From 956b7541f8eab9a32d7a1aaac44259fb95fd47f5 Mon Sep 17 00:00:00 2001 From: husharp Date: Fri, 8 Dec 2023 17:39:15 +0800 Subject: [PATCH] add Signed-off-by: husharp --- cmd/http-service/go.mod | 12 +- cmd/http-service/go.sum | 8 + cmd/http-service/pbgen/api/service.pb.go | 5 +- cmd/http-service/pbgen/api/service_grpc.pb.go | 1 + go.mod | 25 +- go.sum | 41 +- pkg/apis/go.mod | 3 +- pkg/apis/go.sum | 9 +- .../autoscaler/autoscaler_manager.go | 2 +- .../autoscaler/pdplan_autoscaler.go | 10 +- pkg/autoscaler/autoscaler/util.go | 23 +- pkg/client/go.mod | 3 +- pkg/client/go.sum | 8 +- pkg/controller/pd_control.go | 14 +- pkg/controller/pd_control_test.go | 12 +- pkg/controller/pod_control.go | 3 +- pkg/controller/pod_control_test.go | 58 +- pkg/controller/tidb_control.go | 2 +- pkg/controller/tidbcluster/pod_control.go | 31 +- .../tidbcluster/pod_control_test.go | 25 +- pkg/discovery/discovery.go | 7 +- pkg/discovery/discovery_test.go | 50 +- pkg/discovery/server/server_test.go | 3 +- pkg/manager/member/common_store_failover.go | 3 +- pkg/manager/member/pd_failover.go | 3 +- pkg/manager/member/pd_member_manager.go | 8 +- pkg/manager/member/pd_member_manager_test.go | 37 +- pkg/manager/member/pd_scaler.go | 12 +- pkg/manager/member/pd_upgrader.go | 4 +- pkg/manager/member/pd_upgrader_test.go | 5 +- pkg/manager/member/tidb_member_manager.go | 3 +- .../member/tidb_member_manager_test.go | 13 +- pkg/manager/member/tiflash_member_manager.go | 62 +- .../member/tiflash_member_manager_test.go | 500 +++++------- pkg/manager/member/tiflash_scaler.go | 3 +- pkg/manager/member/tiflash_scaler_test.go | 69 +- pkg/manager/member/tikv_member_manager.go | 43 +- .../member/tikv_member_manager_test.go | 528 +++++-------- pkg/manager/member/tikv_scaler.go | 12 +- pkg/manager/member/tikv_scaler_test.go | 149 ++-- pkg/manager/member/tikv_upgrader.go | 16 +- pkg/manager/member/tikv_upgrader_test.go | 7 +- pkg/pdapi/fake_pdapi.go | 420 +++++++--- pkg/pdapi/pd_config.go | 273 ------- pkg/pdapi/pd_control.go | 28 +- pkg/pdapi/pdapi.go | 718 ------------------ pkg/pdapi/pdapi_test.go | 476 +++--------- pkg/pdapi/pdutil.go | 140 +++- pkg/util/util.go | 5 +- tests/e2e/tidbcluster/tidbcluster.go | 13 +- .../util/proxiedpdclient/proxiedpdclient.go | 7 +- 51 files changed, 1368 insertions(+), 2544 deletions(-) delete mode 100644 pkg/pdapi/pd_config.go diff --git a/cmd/http-service/go.mod b/cmd/http-service/go.mod index 15f747a457d..4373c4a983a 100644 --- a/cmd/http-service/go.mod +++ b/cmd/http-service/go.mod @@ -61,21 +61,21 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb // indirect github.com/pelletier/go-toml/v2 v2.1.0 // indirect - github.com/pingcap/errors v0.11.4 // indirect + github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect github.com/pingcap/tiproxy/lib v0.0.0-20230907130944-eb5b4b9c9e79 // indirect github.com/prometheus/common v0.28.0 // indirect github.com/prometheus/prometheus v1.8.2 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.2.11 // indirect - go.uber.org/atomic v1.9.0 // indirect + go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.8.0 // indirect golang.org/x/arch v0.5.0 // indirect - golang.org/x/crypto v0.13.0 // indirect - golang.org/x/net v0.15.0 // indirect + golang.org/x/crypto v0.14.0 // indirect + golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.12.0 // indirect - golang.org/x/sys v0.12.0 // indirect - golang.org/x/term v0.12.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/cmd/http-service/go.sum b/cmd/http-service/go.sum index 6bb1db04308..3953172e602 100644 --- a/cmd/http-service/go.sum +++ b/cmd/http-service/go.sum @@ -267,6 +267,7 @@ github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdU github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg= github.com/pingcap/log v1.1.1-0.20230317032135-a0d097d16e22 h1:2SOzvGvE8beiC1Y4g9Onkvu6UmuBBOeWRGQEjJaT/JY= github.com/pingcap/log v1.1.1-0.20230317032135-a0d097d16e22/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pingcap/tiproxy/lib v0.0.0-20230907130944-eb5b4b9c9e79 h1:q2ILYQoB7COkkkE+dA3hwIFHZHYxOwDBpR3AYenb/hM= @@ -336,9 +337,11 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= @@ -360,6 +363,7 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -426,6 +430,7 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -489,10 +494,12 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -523,6 +530,7 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= diff --git a/cmd/http-service/pbgen/api/service.pb.go b/cmd/http-service/pbgen/api/service.pb.go index f259ca49335..b7f48ae5584 100644 --- a/cmd/http-service/pbgen/api/service.pb.go +++ b/cmd/http-service/pbgen/api/service.pb.go @@ -7,13 +7,14 @@ package api import ( + reflect "reflect" + sync "sync" + _ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options" _ "google.golang.org/genproto/googleapis/api/annotations" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" structpb "google.golang.org/protobuf/types/known/structpb" - reflect "reflect" - sync "sync" ) const ( diff --git a/cmd/http-service/pbgen/api/service_grpc.pb.go b/cmd/http-service/pbgen/api/service_grpc.pb.go index 60ba1a55fc2..610bad3ed54 100644 --- a/cmd/http-service/pbgen/api/service_grpc.pb.go +++ b/cmd/http-service/pbgen/api/service_grpc.pb.go @@ -8,6 +8,7 @@ package api import ( context "context" + grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" diff --git a/go.mod b/go.mod index 91a0c24b131..0c88b127a45 100644 --- a/go.mod +++ b/go.mod @@ -31,12 +31,12 @@ require ( github.com/onsi/gomega v1.10.2 github.com/openshift/generic-admission-server v1.14.1-0.20210422140326-da96454c926d github.com/pingcap/advanced-statefulset/client v1.17.1-0.20231124094705-00595b4ef4ac - github.com/pingcap/errors v0.11.4 + github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c github.com/pingcap/kvproto v0.0.0-20231122054644-fb0f5c2a0a10 github.com/pingcap/tidb-operator/pkg/apis v1.6.0-alpha.8 github.com/pingcap/tidb-operator/pkg/client v1.6.0-alpha.8 github.com/pingcap/tiproxy/lib v0.0.0-20230907130944-eb5b4b9c9e79 - github.com/prometheus/client_golang v1.11.0 + github.com/prometheus/client_golang v1.11.1 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.28.0 github.com/prometheus/prom2json v1.3.0 @@ -45,10 +45,10 @@ require ( github.com/sethvargo/go-password v0.2.0 github.com/spf13/cobra v1.5.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.1 - github.com/tikv/pd v2.1.17+incompatible + github.com/stretchr/testify v1.8.2 + github.com/tikv/pd/client v0.0.0-00010101000000-000000000000 go.etcd.io/etcd/client/v3 v3.5.0 - go.uber.org/atomic v1.9.0 + go.uber.org/atomic v1.10.0 gocloud.dev v0.18.0 golang.org/x/sync v0.3.0 golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 @@ -76,6 +76,7 @@ require ( cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v1.1.2 // indirect + github.com/benbjohnson/clock v1.3.0 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/felixge/httpsnoop v1.0.1 // indirect github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect @@ -86,6 +87,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 // indirect github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca // indirect go.etcd.io/etcd/api/v3 v3.5.0 // indirect go.opentelemetry.io/contrib v0.20.0 // indirect @@ -135,11 +137,11 @@ require ( github.com/blang/semver v3.5.1+incompatible // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chai2010/gettext-go v0.0.0-20170215093142-bf70f2a70fb1 // indirect - github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-semver v0.3.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dimchansky/utfbom v1.1.0 // indirect github.com/docker/distribution v2.7.1+incompatible // indirect - github.com/docker/go-units v0.4.0 + github.com/docker/go-units v0.5.0 github.com/dsnet/compress v0.0.1 // indirect github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f // indirect; indirectload github.com/evanphx/json-patch v4.12.0+incompatible // indirect @@ -183,7 +185,6 @@ require ( github.com/opencontainers/go-digest v1.0.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pierrec/lz4 v2.0.5+incompatible // indirect - github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/procfs v0.6.0 // indirect @@ -194,8 +195,8 @@ require ( github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.5 // indirect go.opencensus.io v0.24.0 // indirect - go.uber.org/multierr v1.8.0 - go.uber.org/zap v1.23.0 // indirect + go.uber.org/multierr v1.11.0 + go.uber.org/zap v1.24.0 // indirect golang.org/x/crypto v0.14.0 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.11.0 // indirect @@ -209,7 +210,7 @@ require ( google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.62.0 // indirect - gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog v1.0.0 // indirect @@ -221,6 +222,8 @@ require ( replace github.com/pingcap/tidb-operator/pkg/apis => ./pkg/apis +replace github.com/tikv/pd/client => github.com/HuSharp/pd/client v0.0.0-20231208071038-9ada30d08673 + replace github.com/pingcap/tidb-operator/pkg/client => ./pkg/client replace github.com/renstrom/dedent => github.com/lithammer/dedent v1.1.0 diff --git a/go.sum b/go.sum index db0568aabb2..b7c58468341 100644 --- a/go.sum +++ b/go.sum @@ -96,6 +96,8 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190605020000-c4ba1fdf4d36/go.mod h1:aJ4qN3TfrelA6NZ6AXsXRfmEVaYin3EDbSPJrKS8OXo= +github.com/HuSharp/pd/client v0.0.0-20231208071038-9ada30d08673 h1:kBFxvNFGqb120nKcI5OpE+oG6B0Pw5E2jp2dewNiiWM= +github.com/HuSharp/pd/client v0.0.0-20231208071038-9ada30d08673/go.mod h1:Q62tRX5idVgtiN1OKXht5BMGxYMa/rYzjkrKwjARUmA= github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e h1:eb0Pzkt15Bm7f2FFYv7sjY7NPFi3cPkS3tv1CcrFBWA= github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= @@ -151,8 +153,9 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.16.13/go.mod h1:Ru3QVMLygVs/07UQ3YDu github.com/aws/smithy-go v1.12.1 h1:yQRC55aXN/y1W10HgwHle01DRuV9Dpf31iGkotjt3Ag= github.com/aws/smithy-go v1.12.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= @@ -184,8 +187,9 @@ github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= @@ -210,8 +214,9 @@ github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQ github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q= @@ -606,12 +611,13 @@ github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pingcap/advanced-statefulset/client v1.17.1-0.20231124094705-00595b4ef4ac h1:PHznG4cEPpDAltq8pCw+a5qly8c00yhTWqVb4OYDEKg= github.com/pingcap/advanced-statefulset/client v1.17.1-0.20231124094705-00595b4ef4ac/go.mod h1:pHQk/hK89qM+LTAkVowB9PgVBBhN/9F4mgBkkStSDPQ= -github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg= -github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= -github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= -github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c h1:xpW9bvK+HuuTmyFqUwr+jcCvpVkK7sumiz+ko5H9eq4= +github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg= github.com/pingcap/kvproto v0.0.0-20231122054644-fb0f5c2a0a10 h1:qnhfzwdWOy8oOSZYX7/aK9XKDs4hJ6P/Gg+s7Sr9VKY= github.com/pingcap/kvproto v0.0.0-20231122054644-fb0f5c2a0a10/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8= +github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 h1:HR/ylkkLmGdSSDaD8IDP+SZrdhV1Kibl9KrHxJ9eciw= +github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pingcap/tiproxy/lib v0.0.0-20230907130944-eb5b4b9c9e79 h1:q2ILYQoB7COkkkE+dA3hwIFHZHYxOwDBpR3AYenb/hM= github.com/pingcap/tiproxy/lib v0.0.0-20230907130944-eb5b4b9c9e79/go.mod h1:LL2iRLUCzFd/zThAfn3BTA4ywSzv1etnJsOIydzsokk= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -707,11 +713,10 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tikv/pd v2.1.17+incompatible h1:48YYz8r16tItl3fxHmSGxGC2UemO6/Xp3Yq0/G38SnE= -github.com/tikv/pd v2.1.17+incompatible/go.mod h1:v6C/D7ONC49SgjI4jbGnooSizvijaO/bdIm62DVR4tI= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= @@ -800,23 +805,26 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= -go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= gocloud.dev v0.18.0 h1:HX6uFZYZs9tUP87jzoWgB8dl4ihsRpiAsBDKTthiApY= gocloud.dev v0.18.0/go.mod h1:lhLOb91+9tKB8RnNlsx+weJGEd0AHM94huK1bmrhPwM= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1208,8 +1216,9 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= diff --git a/pkg/apis/go.mod b/pkg/apis/go.mod index 0cfb1a8ef1d..09fe9b54bae 100644 --- a/pkg/apis/go.mod +++ b/pkg/apis/go.mod @@ -9,7 +9,7 @@ require ( github.com/google/gofuzz v1.1.0 github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb github.com/onsi/gomega v1.10.2 - github.com/pingcap/errors v0.11.4 + github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c github.com/pingcap/tiproxy/lib v0.0.0-20230907130944-eb5b4b9c9e79 github.com/prometheus/common v0.28.0 github.com/prometheus/prometheus v1.8.2 @@ -39,6 +39,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/rogpeppe/go-internal v1.11.0 // indirect + go.uber.org/atomic v1.10.0 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/text v0.13.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect diff --git a/pkg/apis/go.sum b/pkg/apis/go.sum index a5aceb01afe..9c44478e081 100644 --- a/pkg/apis/go.sum +++ b/pkg/apis/go.sum @@ -223,13 +223,12 @@ github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGV github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= -github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c h1:xpW9bvK+HuuTmyFqUwr+jcCvpVkK7sumiz+ko5H9eq4= +github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg= github.com/pingcap/tiproxy/lib v0.0.0-20230907130944-eb5b4b9c9e79 h1:q2ILYQoB7COkkkE+dA3hwIFHZHYxOwDBpR3AYenb/hM= github.com/pingcap/tiproxy/lib v0.0.0-20230907130944-eb5b4b9c9e79/go.mod h1:LL2iRLUCzFd/zThAfn3BTA4ywSzv1etnJsOIydzsokk= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -281,6 +280,9 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -447,6 +449,7 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= diff --git a/pkg/autoscaler/autoscaler/autoscaler_manager.go b/pkg/autoscaler/autoscaler/autoscaler_manager.go index 749c05ca74a..deb6d6c5d1d 100644 --- a/pkg/autoscaler/autoscaler/autoscaler_manager.go +++ b/pkg/autoscaler/autoscaler/autoscaler_manager.go @@ -99,7 +99,7 @@ func (am *autoScalerManager) syncExternal(tc *v1alpha1.TidbCluster, tac *v1alpha func (am *autoScalerManager) syncPD(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbClusterAutoScaler, component v1alpha1.MemberType) error { strategy := autoscalerToStrategy(tac, component) // Request PD for auto-scaling plans - plans, err := controller.GetPDClient(am.deps.PDControl, tc).GetAutoscalingPlans(*strategy) + plans, err := controller.GetPDClient(am.deps.PDControl, tc).GetAutoscalingPlans(context.TODO(), *strategy) if err != nil { klog.Errorf("tac[%s/%s] cannot get auto-scaling plans for component %v err:%v", tac.Namespace, tac.Name, component, err) return err diff --git a/pkg/autoscaler/autoscaler/pdplan_autoscaler.go b/pkg/autoscaler/autoscaler/pdplan_autoscaler.go index e623c13880b..86f4070bd91 100644 --- a/pkg/autoscaler/autoscaler/pdplan_autoscaler.go +++ b/pkg/autoscaler/autoscaler/pdplan_autoscaler.go @@ -19,7 +19,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/label" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" - "github.com/pingcap/tidb-operator/pkg/pdapi" + pd "github.com/tikv/pd/client/http" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -53,9 +53,9 @@ func (am *autoScalerManager) getAutoScaledClusters(tac *v1alpha1.TidbClusterAuto return } -func (am *autoScalerManager) syncPlans(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbClusterAutoScaler, plans []pdapi.Plan, component v1alpha1.MemberType) error { +func (am *autoScalerManager) syncPlans(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbClusterAutoScaler, plans []pd.Plan, component v1alpha1.MemberType) error { planGroups := sets.String{} - groupPlanMap := make(map[string]pdapi.Plan) + groupPlanMap := make(map[string]pd.Plan) for _, plan := range plans { groupName := plan.Labels[groupLabelKey] planGroups.Insert(groupName) @@ -125,7 +125,7 @@ func (am *autoScalerManager) deleteAutoscalingClusters(tac *v1alpha1.TidbCluster return errorutils.NewAggregate(errs) } -func (am *autoScalerManager) updateAutoscalingClusters(tac *v1alpha1.TidbClusterAutoScaler, groupsToUpdate []string, groupTcMap map[string]*v1alpha1.TidbCluster, groupPlanMap map[string]pdapi.Plan) error { +func (am *autoScalerManager) updateAutoscalingClusters(tac *v1alpha1.TidbClusterAutoScaler, groupsToUpdate []string, groupTcMap map[string]*v1alpha1.TidbCluster, groupPlanMap map[string]pd.Plan) error { var errs []error for _, group := range groupsToUpdate { actual, oldTc, plan := groupTcMap[group].DeepCopy(), groupTcMap[group], groupPlanMap[group] @@ -164,7 +164,7 @@ func (am *autoScalerManager) updateAutoscalingClusters(tac *v1alpha1.TidbCluster return errorutils.NewAggregate(errs) } -func (am *autoScalerManager) createAutoscalingClusters(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbClusterAutoScaler, groupsToCreate []string, groupPlanMap map[string]pdapi.Plan) error { +func (am *autoScalerManager) createAutoscalingClusters(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbClusterAutoScaler, groupsToCreate []string, groupPlanMap map[string]pd.Plan) error { var errs []error for _, group := range groupsToCreate { plan := groupPlanMap[group] diff --git a/pkg/autoscaler/autoscaler/util.go b/pkg/autoscaler/autoscaler/util.go index c2eca9583c2..d7442d5408e 100644 --- a/pkg/autoscaler/autoscaler/util.go +++ b/pkg/autoscaler/autoscaler/util.go @@ -22,7 +22,8 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/label" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" - "github.com/pingcap/tidb-operator/pkg/pdapi" + pd "github.com/tikv/pd/client/http" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -293,14 +294,14 @@ func validateTAC(tac *v1alpha1.TidbClusterAutoScaler) error { return nil } -func autoscalerToStrategy(tac *v1alpha1.TidbClusterAutoScaler, component v1alpha1.MemberType) *pdapi.Strategy { +func autoscalerToStrategy(tac *v1alpha1.TidbClusterAutoScaler, component v1alpha1.MemberType) *pd.Strategy { resources := getSpecResources(tac, component) - strategy := &pdapi.Strategy{ - Resources: make([]*pdapi.Resource, 0, len(resources)), + strategy := &pd.Strategy{ + Resources: make([]*pd.Resource, 0, len(resources)), } for typ, res := range resources { - resource := &pdapi.Resource{ + resource := &pd.Resource{ CPU: res.CPU.AsDec().UnscaledBig().Uint64(), Memory: res.Memory.AsDec().UnscaledBig().Uint64(), Storage: res.Storage.AsDec().UnscaledBig().Uint64(), @@ -315,16 +316,16 @@ func autoscalerToStrategy(tac *v1alpha1.TidbClusterAutoScaler, component v1alpha switch component { case v1alpha1.TiDBMemberType: - strategy.Rules = []*pdapi.Rule{autoRulesToStrategyRule(component.String(), tac.Spec.TiDB.Rules)} + strategy.Rules = []*pd.StrategyRule{autoRulesToStrategyRule(component.String(), tac.Spec.TiDB.Rules)} case v1alpha1.TiKVMemberType: - strategy.Rules = []*pdapi.Rule{autoRulesToStrategyRule(component.String(), tac.Spec.TiKV.Rules)} + strategy.Rules = []*pd.StrategyRule{autoRulesToStrategyRule(component.String(), tac.Spec.TiKV.Rules)} } return strategy } -func autoRulesToStrategyRule(component string, rules map[corev1.ResourceName]v1alpha1.AutoRule) *pdapi.Rule { - result := &pdapi.Rule{ +func autoRulesToStrategyRule(component string, rules map[corev1.ResourceName]v1alpha1.AutoRule) *pd.StrategyRule { + result := &pd.StrategyRule{ Component: component, } for res, rule := range rules { @@ -332,14 +333,14 @@ func autoRulesToStrategyRule(component string, rules map[corev1.ResourceName]v1a case corev1.ResourceCPU: // For CPU rule, users should both specify max_threshold and min_threshold // Defaulting and validating make sure that the min_threshold is set - result.CPURule = &pdapi.CPURule{ + result.CPURule = &pd.CPURule{ MaxThreshold: rule.MaxThreshold, MinThreshold: *rule.MinThreshold, ResourceTypes: rule.ResourceTypes, } case corev1.ResourceStorage: // For storage rule, users need only set the max_threshold and we convert it to min_threshold for PD - result.StorageRule = &pdapi.StorageRule{ + result.StorageRule = &pd.StorageRule{ MinThreshold: 1.0 - rule.MaxThreshold, ResourceTypes: rule.ResourceTypes, } diff --git a/pkg/client/go.mod b/pkg/client/go.mod index 17881f50008..090f6c4576d 100644 --- a/pkg/client/go.mod +++ b/pkg/client/go.mod @@ -32,11 +32,12 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb // indirect - github.com/pingcap/errors v0.11.4 // indirect + github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect github.com/pingcap/tiproxy/lib v0.0.0-20230907130944-eb5b4b9c9e79 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/common v0.28.0 // indirect github.com/prometheus/prometheus v1.8.2 // indirect + go.uber.org/atomic v1.10.0 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.11.0 // indirect golang.org/x/sys v0.13.0 // indirect diff --git a/pkg/client/go.sum b/pkg/client/go.sum index 3c819cfe577..8f31c6d5dff 100644 --- a/pkg/client/go.sum +++ b/pkg/client/go.sum @@ -223,8 +223,8 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= -github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c h1:xpW9bvK+HuuTmyFqUwr+jcCvpVkK7sumiz+ko5H9eq4= +github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg= github.com/pingcap/tiproxy/lib v0.0.0-20230907130944-eb5b4b9c9e79 h1:q2ILYQoB7COkkkE+dA3hwIFHZHYxOwDBpR3AYenb/hM= github.com/pingcap/tiproxy/lib v0.0.0-20230907130944-eb5b4b9c9e79/go.mod h1:LL2iRLUCzFd/zThAfn3BTA4ywSzv1etnJsOIydzsokk= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -281,6 +281,9 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -448,6 +451,7 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= diff --git a/pkg/controller/pd_control.go b/pkg/controller/pd_control.go index bd812d03b13..4d657b5daaf 100644 --- a/pkg/controller/pd_control.go +++ b/pkg/controller/pd_control.go @@ -14,12 +14,15 @@ package controller import ( + "context" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/pdapi" + pd "github.com/tikv/pd/client/http" ) // getPDClientFromService gets the pd client from the TidbCluster -func getPDClientFromService(pdControl pdapi.PDControlInterface, tc *v1alpha1.TidbCluster) pdapi.PDClient { +func getPDClientFromService(pdControl pdapi.PDControlInterface, tc *v1alpha1.TidbCluster) pd.Client { if tc.Heterogeneous() && tc.WithoutLocalPD() { return pdControl.GetPDClient(pdapi.Namespace(tc.Spec.Cluster.Namespace), tc.Spec.Cluster.Name, tc.IsTLSClusterEnabled(), pdapi.TLSCertFromTC(pdapi.Namespace(tc.GetNamespace()), tc.GetName()), @@ -35,21 +38,22 @@ func getPDClientFromService(pdControl pdapi.PDControlInterface, tc *v1alpha1.Tid // build another one with the ClientURL in the PeerMembers. // ClientURL example: // ClientURL: https://cluster2-pd-0.cluster2-pd-peer.pingcap.svc.cluster2.local -func GetPDClient(pdControl pdapi.PDControlInterface, tc *v1alpha1.TidbCluster) pdapi.PDClient { +func GetPDClient(pdControl pdapi.PDControlInterface, tc *v1alpha1.TidbCluster) pd.Client { pdClient := getPDClientFromService(pdControl, tc) if len(tc.Status.PD.PeerMembers) == 0 { return pdClient } - _, err := pdClient.GetHealth() + ctx := context.TODO() + _, err := pdClient.GetHealth(ctx) if err == nil { return pdClient } for _, pdMember := range tc.Status.PD.PeerMembers { pdPeerClient := pdControl.GetPDClient(pdapi.Namespace(tc.GetNamespace()), tc.GetName(), tc.IsTLSClusterEnabled(), pdapi.SpecifyClient(pdMember.ClientURL, pdMember.Name)) - _, err := pdPeerClient.GetHealth() + _, err := pdPeerClient.GetHealth(ctx) if err == nil { return pdPeerClient } @@ -69,7 +73,7 @@ func NewFakePDClient(pdControl *pdapi.FakePDControl, tc *v1alpha1.TidbCluster) * return pdClient } -// NewFakePDClient creates a fake pdclient that is set as the pd client +// NewFakePDClientWithAddress creates a fake pdclient that is set as the pd client func NewFakePDClientWithAddress(pdControl *pdapi.FakePDControl, peerURL string) *pdapi.FakePDClient { pdClient := pdapi.NewFakePDClient() pdControl.SetPDClientWithAddress(peerURL, pdClient) diff --git a/pkg/controller/pd_control_test.go b/pkg/controller/pd_control_test.go index 51c33ad8c9f..17262fda055 100644 --- a/pkg/controller/pd_control_test.go +++ b/pkg/controller/pd_control_test.go @@ -14,12 +14,14 @@ package controller import ( + "context" "fmt" "testing" . "github.com/onsi/gomega" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/pdapi" + pd "github.com/tikv/pd/client/http" kubeinformers "k8s.io/client-go/informers" kubefake "k8s.io/client-go/kubernetes/fake" ) @@ -52,12 +54,12 @@ func TestGetPDClient(t *testing.T) { pdClientCluster1 := NewFakePDClient(pdControl, tc) pdClientCluster1.AddReaction(pdapi.GetHealthActionType, func(action *pdapi.Action) (interface{}, error) { - return &pdapi.HealthInfo{Healths: []pdapi.MemberHealth{ + return &pd.HealthInfo{Healths: []pd.MemberHealth{ {Name: "pd-0", MemberID: uint64(1), ClientUrls: []string{"http://pd-0.pd.pingcap.cluster1.com:2379"}, Health: true}, }}, nil }) pdClient := GetPDClient(pdControl, tc) - _, err := pdClient.GetHealth() + _, err := pdClient.GetHealth(context.TODO()) g.Expect(err).To(BeNil()) }, }, @@ -76,12 +78,12 @@ func TestGetPDClient(t *testing.T) { }) pdClientCluster2 := NewFakePDClientWithAddress(pdControl, "pd-0") pdClientCluster2.AddReaction(pdapi.GetHealthActionType, func(action *pdapi.Action) (interface{}, error) { - return &pdapi.HealthInfo{Healths: []pdapi.MemberHealth{ + return &pd.HealthInfo{Healths: []pd.MemberHealth{ {Name: "pd-0", MemberID: uint64(1), ClientUrls: []string{"http://pd-0.pd.pingcap.cluster2.com:2379"}, Health: true}, }}, nil }) pdClient := GetPDClient(pdControl, tc) - _, err := pdClient.GetHealth() + _, err := pdClient.GetHealth(context.TODO()) g.Expect(err).To(BeNil()) }, }, @@ -103,7 +105,7 @@ func TestGetPDClient(t *testing.T) { return nil, fmt.Errorf("Fake cluster 2 PD crashed") }) pdClient := GetPDClient(pdControl, tc) - _, err := pdClient.GetHealth() + _, err := pdClient.GetHealth(context.TODO()) g.Expect(err).To(HaveOccurred()) }, }, diff --git a/pkg/controller/pod_control.go b/pkg/controller/pod_control.go index 2b596a06e3f..a6ec782cf5f 100644 --- a/pkg/controller/pod_control.go +++ b/pkg/controller/pod_control.go @@ -132,8 +132,9 @@ func (c *realPodControl) UpdateMetaInfo(tc *v1alpha1.TidbCluster, pod *corev1.Po pdClient := GetPDClient(c.pdControl, tc) + ctx := context.TODO() if labels[label.ClusterIDLabelKey] == "" { - cluster, err := pdClient.GetCluster() + cluster, err := pdClient.GetCluster(ctx) if err != nil { return pod, fmt.Errorf("failed to get tidb cluster info from pd, TidbCluster: %s/%s, err: %v", ns, tcName, err) } diff --git a/pkg/controller/pod_control_test.go b/pkg/controller/pod_control_test.go index f14747dc255..107dcbe0a44 100644 --- a/pkg/controller/pod_control_test.go +++ b/pkg/controller/pod_control_test.go @@ -25,6 +25,8 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/label" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/pdapi" + pd "github.com/tikv/pd/client/http" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -73,7 +75,7 @@ func TestPodControlUpdateMetaInfo(t *testing.T) { return cluster, nil }) pdClient.AddReaction(pdapi.GetMembersActionType, func(action *pdapi.Action) (interface{}, error) { - membersInfo := &pdapi.MembersInfo{ + membersInfo := &pd.MembersInfo{ Members: []*pdpb.Member{ { MemberId: 111, @@ -83,14 +85,12 @@ func TestPodControlUpdateMetaInfo(t *testing.T) { return membersInfo, nil }) pdClient.AddReaction(pdapi.GetStoresActionType, func(action *pdapi.Action) (interface{}, error) { - storesInfo := &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storesInfo := &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s.web", TestPodName), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s.web", TestPodName), }, }, }, @@ -209,7 +209,7 @@ func TestPodControlUpdateMetaInfoSuccess(t *testing.T) { return cluster, nil }) pdClient.AddReaction(pdapi.GetMembersActionType, func(action *pdapi.Action) (interface{}, error) { - membersInfo := &pdapi.MembersInfo{ + membersInfo := &pd.MembersInfo{ Members: []*pdpb.Member{ { MemberId: 111, @@ -219,14 +219,12 @@ func TestPodControlUpdateMetaInfoSuccess(t *testing.T) { return membersInfo, nil }) pdClient.AddReaction(pdapi.GetStoresActionType, func(action *pdapi.Action) (interface{}, error) { - storesInfo := &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storesInfo := &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s.web", TestPodName), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s.web", TestPodName), }, }, }, @@ -252,7 +250,7 @@ func TestPodControlUpdateMetaInfoGetClusterFailed(t *testing.T) { return nil, errors.New("failed to get cluster info from PD server") }) pdClient.AddReaction(pdapi.GetMembersActionType, func(action *pdapi.Action) (interface{}, error) { - membersInfo := &pdapi.MembersInfo{ + membersInfo := &pd.MembersInfo{ Members: []*pdpb.Member{ { MemberId: 111, @@ -262,14 +260,12 @@ func TestPodControlUpdateMetaInfoGetClusterFailed(t *testing.T) { return membersInfo, nil }) pdClient.AddReaction(pdapi.GetStoresActionType, func(action *pdapi.Action) (interface{}, error) { - storesInfo := &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storesInfo := &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s.web", TestPodName), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s.web", TestPodName), }, }, }, @@ -298,7 +294,7 @@ func TestPodControlUpdateMetaInfoUpdatePodFailed(t *testing.T) { return cluster, nil }) pdClient.AddReaction(pdapi.GetMembersActionType, func(action *pdapi.Action) (interface{}, error) { - membersInfo := &pdapi.MembersInfo{ + membersInfo := &pd.MembersInfo{ Members: []*pdpb.Member{ { MemberId: 111, @@ -308,14 +304,12 @@ func TestPodControlUpdateMetaInfoUpdatePodFailed(t *testing.T) { return membersInfo, nil }) pdClient.AddReaction(pdapi.GetStoresActionType, func(action *pdapi.Action) (interface{}, error) { - storesInfo := &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storesInfo := &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s.web", TestPodName), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s.web", TestPodName), }, }, }, diff --git a/pkg/controller/tidb_control.go b/pkg/controller/tidb_control.go index c5e3e32e3ca..cd8869ce2dd 100644 --- a/pkg/controller/tidb_control.go +++ b/pkg/controller/tidb_control.go @@ -41,7 +41,7 @@ type DBInfo struct { type TiDBControlInterface interface { // GetHealth returns tidb's health info GetHealth(tc *v1alpha1.TidbCluster, ordinal int32) (bool, error) - // Get TIDB info return tidb's DBInfo + // GetInfo Get TIDB info return tidb's DBInfo GetInfo(tc *v1alpha1.TidbCluster, ordinal int32) (*DBInfo, error) // SetServerLabels update TiDB's labels config SetServerLabels(tc *v1alpha1.TidbCluster, ordinal int32, labels map[string]string) error diff --git a/pkg/controller/tidbcluster/pod_control.go b/pkg/controller/tidbcluster/pod_control.go index 36a44c09a69..ff6c2446e83 100644 --- a/pkg/controller/tidbcluster/pod_control.go +++ b/pkg/controller/tidbcluster/pod_control.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/metrics" "github.com/pingcap/tidb-operator/pkg/pdapi" "github.com/pingcap/tidb-operator/pkg/third_party/k8s" + pd "github.com/tikv/pd/client/http" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -54,7 +55,7 @@ type PodController struct { podStats map[string]stat // only set in test - testPDClient pdapi.PDClient + testPDClient pd.Client recheckLeaderCountDuration time.Duration recheckClusterStableDuration time.Duration recheckStoreTombstoneDuration time.Duration @@ -230,7 +231,7 @@ func (c *PodController) sync(key string) (reconcile.Result, error) { } } -func (c *PodController) getPDClient(tc *v1alpha1.TidbCluster) pdapi.PDClient { +func (c *PodController) getPDClient(tc *v1alpha1.TidbCluster) pd.Client { if c.testPDClient != nil { return c.testPDClient } @@ -282,7 +283,7 @@ func (c *PodController) syncPDPodForLeaderTransfer(ctx context.Context, pod *cor var err error pdName := getPdName(pod, tc) if tc.Status.PD.Leader.Name == pod.Name || tc.Status.PD.Leader.Name == pdName { - err = transferPDLeader(tc, pdClient) + err = transferPDLeader(ctx, tc, pdClient) if err != nil { return reconcile.Result{}, nil } @@ -317,11 +318,11 @@ func (c *PodController) syncPDPodForReplaceVolume(ctx context.Context, pod *core return reconcile.Result{}, fmt.Errorf("Could not parse memberID (%s) from label for pod %s/%s", memberIDStr, pod.Namespace, pod.Name) } pdClient := c.getPDClient(tc) - leader, err := pdClient.GetPDLeader() + leader, err := pdClient.GetPDLeader(ctx) if err != nil { return reconcile.Result{}, err } - membersInfo, err := pdClient.GetMembers() + membersInfo, err := pdClient.GetMembers(ctx) if err != nil { return reconcile.Result{}, err } @@ -340,7 +341,9 @@ func (c *PodController) syncPDPodForReplaceVolume(ctx context.Context, pod *core return reconcile.Result{Requeue: true}, fmt.Errorf("not all PDs ready before leader transfer") } klog.Infof("Transferring PD Leader from %s to %s", pod.Name, targetMemberName) - pdClient.TransferPDLeader(targetMemberName) + if err := pdClient.TransferPDLeader(ctx, targetMemberName); err != nil { + return reconcile.Result{}, err + } // Wait for leader transfer. return reconcile.Result{Requeue: true}, nil } @@ -356,7 +359,7 @@ func (c *PodController) syncPDPodForReplaceVolume(ctx context.Context, pod *core return reconcile.Result{Requeue: true}, fmt.Errorf("not all PDs ready before delete member") } klog.Infof("Deleting PD Member ID: %d", memberID) - err = pdClient.DeleteMemberByID(memberID) + err = pdClient.DeleteMemberByID(ctx, memberID) if err != nil { return reconcile.Result{}, err } @@ -429,7 +432,7 @@ func (c *PodController) syncTiKVPodForEviction(ctx context.Context, pod *corev1. klog.Infof("Cluster %s is unstable: %s", tc.Name, unstableReason) return reconcile.Result{RequeueAfter: c.recheckClusterStableDuration}, nil } - err = pdClient.BeginEvictLeader(storeID) + err = pdapi.BeginEvictLeader(ctx, pdClient, storeID) if err != nil { return reconcile.Result{}, perrors.Annotatef(err, "failed to evict leader for store %d (Pod %s/%s)", storeID, pod.Namespace, pod.Name) } @@ -465,7 +468,7 @@ func (c *PodController) syncTiKVPodForEviction(ctx context.Context, pod *corev1. return perrors.Annotatef(err, "failed to get tikv store id from status for pod %s/%s", pod.Namespace, pod.Name) } - err = pdClient.EndEvictLeader(storeID) + err = pdapi.EndEvictLeader(ctx, pdClient, storeID) if err != nil { return perrors.Annotatef(err, "failed to remove evict leader scheduler for store %d, pod %s/%s", storeID, pod.Namespace, pod.Name) } @@ -547,7 +550,7 @@ func (c *PodController) syncTiKVPodForReplaceVolume(ctx context.Context, pod *co if pod.Labels[label.StoreIDLabelKey] == "" { return reconcile.Result{Requeue: true}, fmt.Errorf("StoreID not yet updated on pod label") } - storeInfo, err := pdClient.GetStore(storeID) + storeInfo, err := pdClient.GetStore(ctx, storeID) if err != nil { return reconcile.Result{}, perrors.Annotatef(err, "failed to get tikv store info from pd for storeid %d pod %s/%s", storeID, pod.Namespace, pod.Name) } @@ -557,7 +560,9 @@ func (c *PodController) syncTiKVPodForReplaceVolume(ctx context.Context, pod *co } // 1. Delete store klog.Infof("storeid %d is Up, deleting due to replace volume annotation.", storeID) - pdClient.DeleteStore(storeID) + if err := pdClient.DeleteStore(ctx, storeID); err != nil { + return reconcile.Result{}, perrors.Annotatef(err, "failed to delete store %d", storeID) + } return reconcile.Result{RequeueAfter: c.recheckStoreTombstoneDuration}, nil } else if storeInfo.Store.StateName == v1alpha1.TiKVStateOffline { // 2. Wait for Tombstone @@ -738,14 +743,14 @@ func needPDLeaderTransfer(pod *corev1.Pod) (string, bool) { return value, true } -func transferPDLeader(tc *v1alpha1.TidbCluster, pdClient pdapi.PDClient) error { +func transferPDLeader(ctx context.Context, tc *v1alpha1.TidbCluster, pdClient pd.Client) error { // find a target from peer members target := pickNewLeader(tc) if len(target) == 0 { return fmt.Errorf("can't find a target pd for leader transfer") } - return pdClient.TransferPDLeader(target) + return pdClient.TransferPDLeader(ctx, target) } func pickNewLeader(tc *v1alpha1.TidbCluster) string { diff --git a/pkg/controller/tidbcluster/pod_control_test.go b/pkg/controller/tidbcluster/pod_control_test.go index 3e603ad929a..11588d75bf6 100644 --- a/pkg/controller/tidbcluster/pod_control_test.go +++ b/pkg/controller/tidbcluster/pod_control_test.go @@ -21,18 +21,19 @@ import ( "testing" "time" - "github.com/pingcap/kvproto/pkg/pdpb" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - . "github.com/onsi/gomega" + "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/tidb-operator/pkg/apis/label" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/pdapi" "github.com/pingcap/tidb-operator/pkg/tikvapi" + pd "github.com/tikv/pd/client/http" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) type kvClient struct { @@ -73,10 +74,10 @@ func TestTiKVPodSyncForEviction(t *testing.T) { var tikvStatus atomic.Value tikvStatus.Store(v1alpha1.TiKVStateDown) pdClient.AddReaction(pdapi.GetStoresActionType, func(action *pdapi.Action) (interface{}, error) { - storesInfo := &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storesInfo := &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ + Store: pd.MetaStore{ StateName: tikvStatus.Load().(string), }, }, @@ -210,8 +211,8 @@ func TestTiKVPodSyncForReplaceVolume(t *testing.T) { pdClient := pdapi.NewFakePDClient() c.testPDClient = pdClient pdClient.AddReaction(pdapi.GetStoreActionType, func(action *pdapi.Action) (interface{}, error) { - storeInfo := &pdapi.StoreInfo{ - Store: &pdapi.MetaStore{ + storeInfo := &pd.StoreInfo{ + Store: pd.MetaStore{ StateName: tt.storeState, }, } @@ -452,7 +453,7 @@ func TestPDPodSyncForLeaderTransfer(t *testing.T) { }, Members: make(map[string]v1alpha1.PDMember), } - healths := make([]pdapi.MemberHealth, c.replicas) + healths := make([]pd.MemberHealth, c.replicas) for i := 0; i < c.replicas; i++ { member := fmt.Sprintf("%s-%d", controller.PDMemberName(tc.Name), i) tc.Status.PD.Members[member] = v1alpha1.PDMember{ @@ -465,13 +466,13 @@ func TestPDPodSyncForLeaderTransfer(t *testing.T) { health = false } } - healths[i] = pdapi.MemberHealth{ + healths[i] = pd.MemberHealth{ Name: member, Health: health, } } pdClient.AddReaction(pdapi.GetHealthActionType, func(action *pdapi.Action) (interface{}, error) { - return &pdapi.HealthInfo{Healths: healths}, nil + return &pd.HealthInfo{Healths: healths}, nil }) for _, i := range c.failed { member := fmt.Sprintf("%s-%d", controller.PDMemberName(tc.Name), i) @@ -764,7 +765,7 @@ func TestPdPodSyncForReplaceVolume(t *testing.T) { }) pdClient.AddReaction(pdapi.GetMembersActionType, func(action *pdapi.Action) (interface{}, error) { - return &pdapi.MembersInfo{ + return &pd.MembersInfo{ Members: []*pdpb.Member{ { MemberId: 123, diff --git a/pkg/discovery/discovery.go b/pkg/discovery/discovery.go index d32f5f286ae..f85d710b0a4 100644 --- a/pkg/discovery/discovery.go +++ b/pkg/discovery/discovery.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/dmapi" "github.com/pingcap/tidb-operator/pkg/pdapi" + pd "github.com/tikv/pd/client/http" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" @@ -124,7 +125,7 @@ func (d *tidbDiscovery) Discover(advertisePeerUrl string) (string, error) { return fmt.Sprintf("--initial-cluster=%s=%s://%s", podName, tc.Scheme(), advertisePeerUrl), nil } - var pdClients []pdapi.PDClient + var pdClients []pd.Client if tc.Spec.PD != nil { // connect to pd of current cluster @@ -150,9 +151,9 @@ func (d *tidbDiscovery) Discover(advertisePeerUrl string) (string, error) { pdClients = append(pdClients, d.pdControl.GetPDClient(pdapi.Namespace(ns), tc.Name, tc.IsTLSClusterEnabled(), pdapi.SpecifyClient(pdMember.ClientURL, pdMember.Name))) } - var membersInfo *pdapi.MembersInfo + var membersInfo *pd.MembersInfo for _, client := range pdClients { - membersInfo, err = client.GetMembers() + membersInfo, err = client.GetMembers(context.TODO()) if err == nil { break } diff --git a/pkg/discovery/discovery_test.go b/pkg/discovery/discovery_test.go index 239a037abd6..d1f1dbac89b 100644 --- a/pkg/discovery/discovery_test.go +++ b/pkg/discovery/discovery_test.go @@ -27,6 +27,8 @@ import ( "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/dmapi" "github.com/pingcap/tidb-operator/pkg/pdapi" + pd "github.com/tikv/pd/client/http" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubeinformers "k8s.io/client-go/informers" @@ -42,7 +44,7 @@ func TestDiscoveryDiscovery(t *testing.T) { url string clusters map[string]*clusterInfo tc *v1alpha1.TidbCluster - getMembersFn func() (*pdapi.MembersInfo, error) + getMembersFn func() (*pd.MembersInfo, error) expectFn func(*GomegaWithT, *tidbDiscovery, string, error) } testFn := func(test testcase, t *testing.T) { @@ -120,7 +122,7 @@ func TestDiscoveryDiscovery(t *testing.T) { url: "demo-pd-0.demo-pd-peer.default.svc:2380", clusters: map[string]*clusterInfo{}, tc: newTC(), - getMembersFn: func() (*pdapi.MembersInfo, error) { + getMembersFn: func() (*pd.MembersInfo, error) { return nil, fmt.Errorf("get members failed") }, expectFn: func(g *GomegaWithT, td *tidbDiscovery, s string, err error) { @@ -136,7 +138,7 @@ func TestDiscoveryDiscovery(t *testing.T) { ns: "default", url: "demo-pd-0.demo-pd-peer.default.svc:2380", tc: newTC(), - getMembersFn: func() (*pdapi.MembersInfo, error) { + getMembersFn: func() (*pd.MembersInfo, error) { return nil, fmt.Errorf("getMembers failed") }, clusters: map[string]*clusterInfo{ @@ -162,7 +164,7 @@ func TestDiscoveryDiscovery(t *testing.T) { url: "demo-pd-0.demo-pd-peer.default.svc:2380", clusters: map[string]*clusterInfo{}, tc: newTC(), - getMembersFn: func() (*pdapi.MembersInfo, error) { + getMembersFn: func() (*pd.MembersInfo, error) { return nil, fmt.Errorf("there are no pd members") }, expectFn: func(g *GomegaWithT, td *tidbDiscovery, s string, err error) { @@ -178,7 +180,7 @@ func TestDiscoveryDiscovery(t *testing.T) { ns: "default", url: "demo-pd-1.demo-pd-peer.default.svc:2380", tc: newTC(), - getMembersFn: func() (*pdapi.MembersInfo, error) { + getMembersFn: func() (*pd.MembersInfo, error) { return nil, fmt.Errorf("there are no pd members 2") }, clusters: map[string]*clusterInfo{ @@ -226,7 +228,7 @@ func TestDiscoveryDiscovery(t *testing.T) { ns: "default", url: "demo-pd-0.demo-pd-peer.default.svc:2380", tc: newTC(), - getMembersFn: func() (*pdapi.MembersInfo, error) { + getMembersFn: func() (*pd.MembersInfo, error) { return nil, fmt.Errorf("there are no pd members 3") }, clusters: map[string]*clusterInfo{ @@ -252,8 +254,8 @@ func TestDiscoveryDiscovery(t *testing.T) { ns: "default", url: "demo-pd-0.demo-pd-peer.default.svc:2380", tc: newTC(), - getMembersFn: func() (*pdapi.MembersInfo, error) { - return &pdapi.MembersInfo{ + getMembersFn: func() (*pd.MembersInfo, error) { + return &pd.MembersInfo{ Members: []*pdpb.Member{ { PeerUrls: []string{"demo-pd-2.demo-pd-peer.default.svc:2380"}, @@ -283,8 +285,8 @@ func TestDiscoveryDiscovery(t *testing.T) { ns: "default", url: "demo-pd-1.demo-pd-peer.default.svc:2380", tc: newTC(), - getMembersFn: func() (*pdapi.MembersInfo, error) { - return &pdapi.MembersInfo{ + getMembersFn: func() (*pd.MembersInfo, error) { + return &pd.MembersInfo{ Members: []*pdpb.Member{ { PeerUrls: []string{"demo-pd-0.demo-pd-peer.default.svc:2380"}, @@ -319,8 +321,8 @@ func TestDiscoveryDiscovery(t *testing.T) { tc.Spec.PD.Replicas = 5 return tc }(), - getMembersFn: func() (*pdapi.MembersInfo, error) { - return &pdapi.MembersInfo{ + getMembersFn: func() (*pd.MembersInfo, error) { + return &pd.MembersInfo{ Members: []*pdpb.Member{ { PeerUrls: []string{"demo-pd-0.demo-pd-peer.default.svc:2380"}, @@ -352,8 +354,8 @@ func TestDiscoveryDiscovery(t *testing.T) { ns: "default", url: "demo-pd-0.demo-pd-peer.default.svc.cluster.local:2380", tc: newTC(), - getMembersFn: func() (*pdapi.MembersInfo, error) { - return &pdapi.MembersInfo{ + getMembersFn: func() (*pd.MembersInfo, error) { + return &pd.MembersInfo{ Members: []*pdpb.Member{ { PeerUrls: []string{"demo-pd-2.demo-pd-peer.default.svc.cluster.local:2380"}, @@ -387,8 +389,8 @@ func TestDiscoveryDiscovery(t *testing.T) { tc.Spec.PD.Replicas = 5 return tc }(), - getMembersFn: func() (*pdapi.MembersInfo, error) { - return &pdapi.MembersInfo{ + getMembersFn: func() (*pd.MembersInfo, error) { + return &pd.MembersInfo{ Members: []*pdpb.Member{ { PeerUrls: []string{"demo-pd-0.demo-pd-peer.default.svc:2380"}, @@ -463,8 +465,8 @@ func TestDiscoveryDiscovery(t *testing.T) { } return tc }(), - getMembersFn: func() (*pdapi.MembersInfo, error) { - return &pdapi.MembersInfo{ + getMembersFn: func() (*pd.MembersInfo, error) { + return &pd.MembersInfo{ Members: []*pdpb.Member{ { PeerUrls: []string{"http://address0:2380"}, @@ -509,8 +511,8 @@ func TestDiscoveryDiscovery(t *testing.T) { } return tc }(), - getMembersFn: func() (*pdapi.MembersInfo, error) { - return &pdapi.MembersInfo{ + getMembersFn: func() (*pd.MembersInfo, error) { + return &pd.MembersInfo{ Members: []*pdpb.Member{ { PeerUrls: []string{"demo-pd-3.demo-pd-peer.default.svc:2380"}, @@ -917,7 +919,7 @@ func TestDiscoveryVerifyPDEndpoint(t *testing.T) { } if test.inclusterPD { pdClientCluster1.AddReaction(pdapi.GetHealthActionType, func(action *pdapi.Action) (interface{}, error) { - return &pdapi.HealthInfo{Healths: []pdapi.MemberHealth{ + return &pd.HealthInfo{Healths: []pd.MemberHealth{ {Name: "pd-0", MemberID: uint64(1), ClientUrls: []string{"https://pd-0.pd.pingcap.cluster1.com:2379"}, Health: true}, }}, nil }) @@ -929,7 +931,7 @@ func TestDiscoveryVerifyPDEndpoint(t *testing.T) { if test.peerclusterPD { pdClientCluster2.AddReaction(pdapi.GetHealthActionType, func(action *pdapi.Action) (interface{}, error) { - return &pdapi.HealthInfo{Healths: []pdapi.MemberHealth{ + return &pd.HealthInfo{Healths: []pd.MemberHealth{ {Name: "pd-0", MemberID: uint64(1), ClientUrls: []string{"https://pd-0.pd.pingcap.cluster2.com:2379"}, Health: true}, }}, nil }) @@ -950,7 +952,7 @@ func TestDiscoveryVerifyPDEndpoint(t *testing.T) { } if test.inclusterPD { pdClientCluster1.AddReaction(pdapi.GetHealthActionType, func(action *pdapi.Action) (interface{}, error) { - return &pdapi.HealthInfo{Healths: []pdapi.MemberHealth{ + return &pd.HealthInfo{Healths: []pd.MemberHealth{ {Name: "pd-0", MemberID: uint64(1), ClientUrls: []string{"http://pd-0.pd.pingcap.cluster1.com:2379"}, Health: true}, }}, nil }) @@ -962,7 +964,7 @@ func TestDiscoveryVerifyPDEndpoint(t *testing.T) { if test.peerclusterPD { pdClientCluster2.AddReaction(pdapi.GetHealthActionType, func(action *pdapi.Action) (interface{}, error) { - return &pdapi.HealthInfo{Healths: []pdapi.MemberHealth{ + return &pd.HealthInfo{Healths: []pd.MemberHealth{ {Name: "pd-0", MemberID: uint64(1), ClientUrls: []string{"http://pd-0.pd.pingcap.cluster2.com:2379"}, Health: true}, }}, nil }) diff --git a/pkg/discovery/server/server_test.go b/pkg/discovery/server/server_test.go index 93995c73cd1..e86ddaa3449 100644 --- a/pkg/discovery/server/server_test.go +++ b/pkg/discovery/server/server_test.go @@ -17,6 +17,7 @@ import ( "context" "encoding/base64" "fmt" + pd "github.com/tikv/pd/client/http" "io" "net/http" "net/http/httptest" @@ -79,7 +80,7 @@ func TestServer(t *testing.T) { defer httpServer.Close() var lock sync.RWMutex - pdMemberInfos := &pdapi.MembersInfo{ + pdMemberInfos := &pd.MembersInfo{ Members: []*pdpb.Member{}, } pdClient.AddReaction(pdapi.GetMembersActionType, func(action *pdapi.Action) (interface{}, error) { diff --git a/pkg/manager/member/common_store_failover.go b/pkg/manager/member/common_store_failover.go index 16cbc1762a4..0932cee8fe3 100644 --- a/pkg/manager/member/common_store_failover.go +++ b/pkg/manager/member/common_store_failover.go @@ -14,6 +14,7 @@ package member import ( + "context" "fmt" "strconv" "time" @@ -176,7 +177,7 @@ func (sf *commonStoreFailover) invokeDeleteFailureStore(tc *v1alpha1.TidbCluster return parseErr } pdCli := controller.GetPDClient(sf.deps.PDControl, tc) - if deleteErr := pdCli.DeleteStore(storeUintId); deleteErr != nil { + if deleteErr := pdCli.DeleteStore(context.TODO(), storeUintId); deleteErr != nil { return deleteErr } msg := fmt.Sprintf("Invoked delete on %s store '%s' in cluster %s/%s", sf.storeAccess.GetMemberType(), failureStore.StoreID, ns, tcName) diff --git a/pkg/manager/member/pd_failover.go b/pkg/manager/member/pd_failover.go index faefd97e8d1..c1df90b8cf3 100644 --- a/pkg/manager/member/pd_failover.go +++ b/pkg/manager/member/pd_failover.go @@ -14,6 +14,7 @@ package member import ( + "context" "fmt" "strconv" "strings" @@ -191,7 +192,7 @@ func (f *pdFailover) tryToDeleteAFailureMember(tc *v1alpha1.TidbCluster) error { return err } // invoke deleteMember api to delete a member from the pd cluster - if err := controller.GetPDClient(f.deps.PDControl, tc).DeleteMemberByID(memberID); err != nil { + if err := controller.GetPDClient(f.deps.PDControl, tc).DeleteMemberByID(context.TODO(), memberID); err != nil { klog.Errorf("pd failover[tryToDeleteAFailureMember]: failed to delete member %s/%s(%d), error: %v", ns, failurePodName, memberID, err) return err } diff --git a/pkg/manager/member/pd_member_manager.go b/pkg/manager/member/pd_member_manager.go index cb86c01025d..2003c14a55e 100644 --- a/pkg/manager/member/pd_member_manager.go +++ b/pkg/manager/member/pd_member_manager.go @@ -14,6 +14,7 @@ package member import ( + "context" "fmt" "path" "regexp" @@ -335,7 +336,8 @@ func (m *pdMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, set *a pdClient := controller.GetPDClient(m.deps.PDControl, tc) - healthInfo, err := pdClient.GetHealth() + ctx := context.TODO() + healthInfo, err := pdClient.GetHealth(ctx) if err != nil { tc.Status.PD.Synced = false // get endpoints info @@ -350,13 +352,13 @@ func (m *pdMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, set *a return err } - cluster, err := pdClient.GetCluster() + cluster, err := pdClient.GetCluster(ctx) if err != nil { tc.Status.PD.Synced = false return err } tc.Status.ClusterID = strconv.FormatUint(cluster.Id, 10) - leader, err := pdClient.GetPDLeader() + leader, err := pdClient.GetPDLeader(ctx) if err != nil { tc.Status.PD.Synced = false return err diff --git a/pkg/manager/member/pd_member_manager_test.go b/pkg/manager/member/pd_member_manager_test.go index 06c74e68877..10bb0e83f0b 100644 --- a/pkg/manager/member/pd_member_manager_test.go +++ b/pkg/manager/member/pd_member_manager_test.go @@ -19,8 +19,6 @@ import ( "strings" "testing" - "github.com/google/go-cmp/cmp" - . "github.com/onsi/gomega" apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" @@ -32,6 +30,8 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/utils/pointer" + "github.com/google/go-cmp/cmp" + . "github.com/onsi/gomega" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb-operator/pkg/apis/label" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" @@ -40,6 +40,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/manager/suspender" "github.com/pingcap/tidb-operator/pkg/manager/volumes" "github.com/pingcap/tidb-operator/pkg/pdapi" + pd "github.com/tikv/pd/client/http" ) func TestPDMemberManagerSyncCreate(t *testing.T) { @@ -235,7 +236,7 @@ func TestPDMemberManagerSyncUpdate(t *testing.T) { type testcase struct { name string modify func(cluster *v1alpha1.TidbCluster) - pdHealth *pdapi.HealthInfo + pdHealth *pd.HealthInfo errWhenUpdateStatefulSet bool errWhenUpdatePDService bool errWhenUpdatePDPeerService bool @@ -353,7 +354,7 @@ func TestPDMemberManagerSyncUpdate(t *testing.T) { {Name: "pd", Type: string(corev1.ServiceTypeNodePort)}, } }, - pdHealth: &pdapi.HealthInfo{Healths: []pdapi.MemberHealth{ + pdHealth: &pd.HealthInfo{Healths: []pd.MemberHealth{ {Name: "pd1", MemberID: uint64(1), ClientUrls: []string{"http://test-pd-1.test-pd-peer.default.svc:2379"}, Health: true}, {Name: "pd2", MemberID: uint64(2), ClientUrls: []string{"http://test-pd-2.test-pd-peer.default.svc:2379"}, Health: true}, {Name: "pd3", MemberID: uint64(3), ClientUrls: []string{"http://test-pd-3.test-pd-peer.default.svc:2379"}, Health: false}, @@ -389,7 +390,7 @@ func TestPDMemberManagerSyncUpdate(t *testing.T) { {Name: "pd", Type: string(corev1.ServiceTypeNodePort)}, } }, - pdHealth: &pdapi.HealthInfo{Healths: []pdapi.MemberHealth{ + pdHealth: &pd.HealthInfo{Healths: []pd.MemberHealth{ {Name: "pd1", MemberID: uint64(1), ClientUrls: []string{"http://pd1:2379"}, Health: true}, {Name: "pd2", MemberID: uint64(2), ClientUrls: []string{"http://pd2:2379"}, Health: true}, {Name: "pd3", MemberID: uint64(3), ClientUrls: []string{"http://pd3:2379"}, Health: false}, @@ -407,7 +408,7 @@ func TestPDMemberManagerSyncUpdate(t *testing.T) { modify: func(tc *v1alpha1.TidbCluster) { tc.Spec.PD.Replicas = 5 }, - pdHealth: &pdapi.HealthInfo{Healths: []pdapi.MemberHealth{ + pdHealth: &pd.HealthInfo{Healths: []pd.MemberHealth{ {Name: "pd1", MemberID: uint64(1), ClientUrls: []string{"http://pd1:2379"}, Health: true}, {Name: "pd2", MemberID: uint64(2), ClientUrls: []string{"http://pd2:2379"}, Health: true}, {Name: "pd3", MemberID: uint64(3), ClientUrls: []string{"http://pd3:2379"}, Health: false}, @@ -473,7 +474,7 @@ func TestPDMemberManagerSyncUpdate(t *testing.T) { }}}, } }, - pdHealth: &pdapi.HealthInfo{Healths: []pdapi.MemberHealth{ + pdHealth: &pd.HealthInfo{Healths: []pd.MemberHealth{ {Name: "pd1", MemberID: uint64(1), ClientUrls: []string{"http://test-pd-1.test-pd-peer.default.svc:2379"}, Health: true}, {Name: "pd2", MemberID: uint64(2), ClientUrls: []string{"http://test-pd-2.test-pd-peer.default.svc:2379"}, Health: true}, {Name: "pd3", MemberID: uint64(3), ClientUrls: []string{"http://test-pd-3.test-pd-peer.default.svc:2379"}, Health: false}, @@ -505,7 +506,7 @@ func TestPDMemberManagerSyncUpdate(t *testing.T) { {Name: "additional", Image: "test"}, } }, - pdHealth: &pdapi.HealthInfo{Healths: []pdapi.MemberHealth{ + pdHealth: &pd.HealthInfo{Healths: []pd.MemberHealth{ {Name: "pd1", MemberID: uint64(1), ClientUrls: []string{"http://test-pd-1.test-pd-peer.default.svc:2379"}, Health: true}, {Name: "pd2", MemberID: uint64(2), ClientUrls: []string{"http://test-pd-2.test-pd-peer.default.svc:2379"}, Health: true}, {Name: "pd3", MemberID: uint64(3), ClientUrls: []string{"http://test-pd-3.test-pd-peer.default.svc:2379"}, Health: false}, @@ -641,7 +642,7 @@ func TestPDMemberManagerUpgrade(t *testing.T) { type testcase struct { name string modify func(cluster *v1alpha1.TidbCluster) - pdHealth *pdapi.HealthInfo + pdHealth *pd.HealthInfo err bool statusChange func(*apps.StatefulSet) expectStatefulSetFn func(*GomegaWithT, *apps.StatefulSet, error) @@ -701,7 +702,7 @@ func TestPDMemberManagerUpgrade(t *testing.T) { modify: func(cluster *v1alpha1.TidbCluster) { cluster.Spec.PD.Image = "pd-test-image:v2" }, - pdHealth: &pdapi.HealthInfo{Healths: []pdapi.MemberHealth{ + pdHealth: &pd.HealthInfo{Healths: []pd.MemberHealth{ {Name: "pd1", MemberID: uint64(1), ClientUrls: []string{"http://test-pd-1.test-pd-peer.default.svc:2379"}, Health: true}, {Name: "pd2", MemberID: uint64(2), ClientUrls: []string{"http://test-pd-2.test-pd-peer.default.svc:2379"}, Health: true}, {Name: "pd3", MemberID: uint64(3), ClientUrls: []string{"http://test-pd-3.test-pd-peer.default.svc:2379"}, Health: false}, @@ -740,7 +741,7 @@ func TestPDMemberManagerSyncPDSts(t *testing.T) { name string preModify func(cluster *v1alpha1.TidbCluster) modify func(cluster *v1alpha1.TidbCluster) - pdHealth *pdapi.HealthInfo + pdHealth *pd.HealthInfo err bool statusChange func(*apps.StatefulSet) expectStatefulSetFn func(*GomegaWithT, *apps.StatefulSet, error) @@ -807,7 +808,7 @@ func TestPDMemberManagerSyncPDSts(t *testing.T) { cluster.ObjectMeta.Annotations = make(map[string]string) cluster.ObjectMeta.Annotations["tidb.pingcap.com/force-upgrade"] = "true" }, - pdHealth: &pdapi.HealthInfo{Healths: []pdapi.MemberHealth{ + pdHealth: &pd.HealthInfo{Healths: []pd.MemberHealth{ {Name: "pd1", MemberID: uint64(1), ClientUrls: []string{"http://pd1:2379"}, Health: false}, {Name: "pd2", MemberID: uint64(2), ClientUrls: []string{"http://pd2:2379"}, Health: false}, {Name: "pd3", MemberID: uint64(3), ClientUrls: []string{"http://pd3:2379"}, Health: false}, @@ -839,7 +840,7 @@ func TestPDMemberManagerSyncPDSts(t *testing.T) { modify: func(cluster *v1alpha1.TidbCluster) { cluster.Spec.PD.Image = "pd-test-image:v2" }, - pdHealth: &pdapi.HealthInfo{Healths: []pdapi.MemberHealth{ + pdHealth: &pd.HealthInfo{Healths: []pd.MemberHealth{ {Name: "pd1", MemberID: uint64(1), ClientUrls: []string{"http://pd1:2379"}, Health: false}, }}, err: true, @@ -869,7 +870,7 @@ func TestPDMemberManagerSyncPDSts(t *testing.T) { modify: func(cluster *v1alpha1.TidbCluster) { cluster.Spec.PD.Image = "pd-test-image:v2" }, - pdHealth: &pdapi.HealthInfo{Healths: []pdapi.MemberHealth{ + pdHealth: &pd.HealthInfo{Healths: []pd.MemberHealth{ {Name: "pd1", MemberID: uint64(1), ClientUrls: []string{"http://pd1:2379"}, Health: false}, }}, err: true, @@ -896,7 +897,7 @@ func TestPDMemberManagerSyncPDSts(t *testing.T) { cluster.Spec.PD.Image = "pd-test-image:v2" cluster.Spec.PD.Replicas = 1 }, - pdHealth: &pdapi.HealthInfo{Healths: []pdapi.MemberHealth{ + pdHealth: &pd.HealthInfo{Healths: []pd.MemberHealth{ {Name: "pd1", MemberID: uint64(1), ClientUrls: []string{"http://pd1:2379"}, Health: false}, {Name: "pd2", MemberID: uint64(2), ClientUrls: []string{"http://pd2:2379"}, Health: false}, {Name: "pd3", MemberID: uint64(3), ClientUrls: []string{"http://pd3:2379"}, Health: false}, @@ -2693,7 +2694,7 @@ func TestPDMemberManagerSyncPDStsWhenPdNotJoinCluster(t *testing.T) { type testcase struct { name string modify func(cluster *v1alpha1.TidbCluster, podIndexer cache.Indexer, pvcIndexer cache.Indexer) - pdHealth *pdapi.HealthInfo + pdHealth *pd.HealthInfo tcStatusChange func(cluster *v1alpha1.TidbCluster) err bool expectTidbClusterFn func(*GomegaWithT, *v1alpha1.TidbCluster) @@ -2791,7 +2792,7 @@ func TestPDMemberManagerSyncPDStsWhenPdNotJoinCluster(t *testing.T) { pvcIndexer.Add(pvc2) } }, - pdHealth: &pdapi.HealthInfo{Healths: []pdapi.MemberHealth{ + pdHealth: &pd.HealthInfo{Healths: []pd.MemberHealth{ {Name: "test-pd-0", MemberID: uint64(1), ClientUrls: []string{"http://test-pd-0:2379"}, Health: false}, {Name: "test-pd-1", MemberID: uint64(2), ClientUrls: []string{"http://test-pd-1:2379"}, Health: false}, }}, @@ -2835,7 +2836,7 @@ func TestPDMemberManagerSyncPDStsWhenPdNotJoinCluster(t *testing.T) { } }, - pdHealth: &pdapi.HealthInfo{Healths: []pdapi.MemberHealth{ + pdHealth: &pd.HealthInfo{Healths: []pd.MemberHealth{ {Name: "test-pd-0", MemberID: uint64(1), ClientUrls: []string{"http://test-pd-0.test-pd-peer.default.svc:2379"}, Health: false}, {Name: "test-pd-1", MemberID: uint64(2), ClientUrls: []string{"http://test-pd-1.test-pd-peer.default.svc:2379"}, Health: false}, {Name: "test-pd-2", MemberID: uint64(2), ClientUrls: []string{"http://test-pd-2.test-pd-peer.default.svc:2379"}, Health: false}, diff --git a/pkg/manager/member/pd_scaler.go b/pkg/manager/member/pd_scaler.go index 8d10e5ce8fa..cc09fc9fdd0 100644 --- a/pkg/manager/member/pd_scaler.go +++ b/pkg/manager/member/pd_scaler.go @@ -14,6 +14,7 @@ package member import ( + "context" "fmt" "github.com/pingcap/advanced-statefulset/client/apis/apps/v1/helper" @@ -100,7 +101,8 @@ func (s *pdScaler) ScaleIn(meta metav1.Object, oldSet *apps.StatefulSet, newSet } pdClient := controller.GetPDClient(s.deps.PDControl, tc) - leader, err := pdClient.GetPDLeader() + ctx := context.TODO() + leader, err := pdClient.GetPDLeader(ctx) if err != nil { return err } @@ -116,9 +118,9 @@ func (s *pdScaler) ScaleIn(meta metav1.Object, oldSet *apps.StatefulSet, newSet } targetPdName := PdName(tcName, targetOrdinal, tc.Namespace, tc.Spec.ClusterDomain, tc.Spec.AcrossK8s) if _, exist := tc.Status.PD.Members[targetPdName]; exist { - err = pdClient.TransferPDLeader(targetPdName) + err = pdClient.TransferPDLeader(ctx, targetPdName) } else { - err = pdClient.TransferPDLeader(PdPodName(tcName, targetOrdinal)) + err = pdClient.TransferPDLeader(ctx, PdPodName(tcName, targetOrdinal)) } if err != nil { return err @@ -126,7 +128,7 @@ func (s *pdScaler) ScaleIn(meta metav1.Object, oldSet *apps.StatefulSet, newSet } else { for _, member := range tc.Status.PD.PeerMembers { if member.Health && member.Name != memberName { - err = pdClient.TransferPDLeader(member.Name) + err = pdClient.TransferPDLeader(ctx, member.Name) if err != nil { return err } @@ -136,7 +138,7 @@ func (s *pdScaler) ScaleIn(meta metav1.Object, oldSet *apps.StatefulSet, newSet } } - err = pdClient.DeleteMember(memberName) + err = pdClient.DeleteMember(ctx, memberName) if err != nil { klog.Errorf("pdScaler.ScaleIn: failed to delete member %s, %v", memberName, err) return err diff --git a/pkg/manager/member/pd_upgrader.go b/pkg/manager/member/pd_upgrader.go index c9ff94d765f..9b070d37603 100644 --- a/pkg/manager/member/pd_upgrader.go +++ b/pkg/manager/member/pd_upgrader.go @@ -14,6 +14,7 @@ package member import ( + "context" "fmt" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" @@ -158,7 +159,8 @@ func (u *pdUpgrader) upgradePDPod(tc *v1alpha1.TidbCluster, ordinal int32, newSe } func (u *pdUpgrader) transferPDLeaderTo(tc *v1alpha1.TidbCluster, targetName string) error { - return controller.GetPDClient(u.deps.PDControl, tc).TransferPDLeader(targetName) + ctx := context.TODO() + return controller.GetPDClient(u.deps.PDControl, tc).TransferPDLeader(ctx, targetName) } // choosePDToTransferFromMembers choose a pd to transfer leader from members diff --git a/pkg/manager/member/pd_upgrader_test.go b/pkg/manager/member/pd_upgrader_test.go index 5eb2008d0c5..e410352ce8e 100644 --- a/pkg/manager/member/pd_upgrader_test.go +++ b/pkg/manager/member/pd_upgrader_test.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/controller" mngerutils "github.com/pingcap/tidb-operator/pkg/manager/utils" "github.com/pingcap/tidb-operator/pkg/pdapi" + pd "github.com/tikv/pd/client/http" . "github.com/onsi/gomega" apps "k8s.io/api/apps/v1" @@ -84,8 +85,8 @@ func TestPDUpgraderUpgrade(t *testing.T) { newSet.Spec.UpdateStrategy.RollingUpdate.Partition = pointer.Int32Ptr(3) pdClient.AddReaction(pdapi.GetHealthActionType, func(action *pdapi.Action) (interface{}, error) { - healthInfo := &pdapi.HealthInfo{ - Healths: []pdapi.MemberHealth{ + healthInfo := &pd.HealthInfo{ + Healths: []pd.MemberHealth{ { Name: PdPodName(upgradeTcName, 1), Health: !test.pdPeersAreUnstable, diff --git a/pkg/manager/member/tidb_member_manager.go b/pkg/manager/member/tidb_member_manager.go index 565a68d44bd..5d5be3db0d8 100644 --- a/pkg/manager/member/tidb_member_manager.go +++ b/pkg/manager/member/tidb_member_manager.go @@ -1173,7 +1173,8 @@ func (m *tidbMemberManager) setServerLabels(tc *v1alpha1.TidbCluster) (int, erro setCount := 0 pdCli := controller.GetPDClient(m.deps.PDControl, tc) - config, err := pdCli.GetConfig() + ctx := context.TODO() + config, err := pdCli.GetConfig(ctx) if err != nil { return setCount, err } diff --git a/pkg/manager/member/tidb_member_manager_test.go b/pkg/manager/member/tidb_member_manager_test.go index fd687f3cb4d..87aff3553f1 100644 --- a/pkg/manager/member/tidb_member_manager_test.go +++ b/pkg/manager/member/tidb_member_manager_test.go @@ -33,6 +33,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/manager/volumes" "github.com/pingcap/tidb-operator/pkg/pdapi" "github.com/pingcap/tidb-operator/pkg/util" + pd "github.com/tikv/pd/client/http" apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" @@ -180,8 +181,8 @@ func TestTiDBMemberManagerSyncUpdate(t *testing.T) { pdControl := tmm.deps.PDControl.(*pdapi.FakePDControl) pdClient := controller.NewFakePDClient(pdControl, tc) pdClient.AddReaction(pdapi.GetConfigActionType, func(action *pdapi.Action) (interface{}, error) { - return &pdapi.PDConfigFromAPI{ - Replication: &pdapi.PDReplicationConfig{}, + return &pd.ServerConfig{ + Replication: pd.ReplicationConfig{}, }, nil }) @@ -2203,8 +2204,8 @@ func TestTiDBMemberManagerScaleToZeroReplica(t *testing.T) { pdControl := tmm.deps.PDControl.(*pdapi.FakePDControl) pdClient := controller.NewFakePDClient(pdControl, tc) pdClient.AddReaction(pdapi.GetConfigActionType, func(action *pdapi.Action) (interface{}, error) { - return &pdapi.PDConfigFromAPI{ - Replication: &pdapi.PDReplicationConfig{}, + return &pd.ServerConfig{ + Replication: pd.ReplicationConfig{}, }, nil }) @@ -2691,8 +2692,8 @@ func TestTiDBMemberManagerSetServerLabels(t *testing.T) { if len(labels) == 0 { labels = []string{"topology.kubernetes.io/zone", corev1.LabelHostname} } - return &pdapi.PDConfigFromAPI{ - Replication: &pdapi.PDReplicationConfig{ + return &pd.ServerConfig{ + Replication: pd.ReplicationConfig{ LocationLabels: labels, }, }, nil diff --git a/pkg/manager/member/tiflash_member_manager.go b/pkg/manager/member/tiflash_member_manager.go index c343a78ec01..2fa9771b7f5 100644 --- a/pkg/manager/member/tiflash_member_manager.go +++ b/pkg/manager/member/tiflash_member_manager.go @@ -14,11 +14,13 @@ package member import ( + "context" "fmt" "reflect" "regexp" "strings" + "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb-operator/pkg/apis/label" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" @@ -29,8 +31,8 @@ import ( "github.com/pingcap/tidb-operator/pkg/manager/volumes" "github.com/pingcap/tidb-operator/pkg/pdapi" "github.com/pingcap/tidb-operator/pkg/util" + pd "github.com/tikv/pd/client/http" - "github.com/pingcap/kvproto/pkg/metapb" apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -135,7 +137,7 @@ func (m *tiflashMemberManager) syncRecoveryForTiFlash(tc *v1alpha1.TidbCluster) // pd recovering mark indicates the pd allcate id had been set properly. func (m *tiflashMemberManager) checkRecoveringMark(tc *v1alpha1.TidbCluster) (bool, error) { pdCli := controller.GetPDClient(m.deps.PDControl, tc) - mark, err := pdCli.GetRecoveringMark() + mark, err := pdCli.GetRecoveringMark(context.TODO()) if err != nil { return false, err } @@ -145,17 +147,17 @@ func (m *tiflashMemberManager) checkRecoveringMark(tc *v1alpha1.TidbCluster) (bo func (m *tiflashMemberManager) enablePlacementRules(tc *v1alpha1.TidbCluster) error { pdCli := controller.GetPDClient(m.deps.PDControl, tc) - config, err := pdCli.GetConfig() + ctx := context.TODO() + config, err := pdCli.GetConfig(ctx) if err != nil { return err } - if config.Replication.EnablePlacementRules != nil && (!*config.Replication.EnablePlacementRules) { - klog.Infof("Cluster %s/%s enable-placement-rules is %v, set it to true", tc.Namespace, tc.Name, *config.Replication.EnablePlacementRules) - enable := true - rep := pdapi.PDReplicationConfig{ - EnablePlacementRules: &enable, + if !config.Replication.EnablePlacementRules { + klog.Infof("Cluster %s/%s enable-placement-rules is %v, set it to true", tc.Namespace, tc.Name, config.Replication.EnablePlacementRules) + rep := pd.ReplicationConfig{ + EnablePlacementRules: true, } - return pdCli.UpdateReplicationConfig(rep) + return pdCli.UpdateReplicationConfig(ctx, rep) } return nil } @@ -741,8 +743,9 @@ func (m *tiflashMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, s tombstoneStores := map[string]v1alpha1.TiKVStore{} pdCli := controller.GetPDClient(m.deps.PDControl, tc) + ctx := context.TODO() // This only returns Up/Down/Offline stores - storesInfo, err := pdCli.GetStores() + storesInfo, err := pdCli.GetStores(ctx) if err != nil { tc.Status.TiFlash.Synced = false klog.Warningf("Fail to GetStores for TidbCluster %s/%s: %s", tc.Namespace, tc.Name, err) @@ -769,24 +772,23 @@ func (m *tiflashMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, s status.LastTransitionTime = oldStore.LastTransitionTime } - if store.Store != nil { - if pattern.Match([]byte(store.Store.Address)) { - stores[status.ID] = *status - } else if util.MatchLabelFromStoreLabels(store.Store.Labels, label.TiFlashLabelVal) { - peerStores[status.ID] = *status - } + if pattern.Match([]byte(store.Store.Address)) { + stores[status.ID] = *status + } else if util.MatchLabelFromStoreLabels(store.Store.Labels, label.TiFlashLabelVal) { + peerStores[status.ID] = *status } } // this returns all tombstone stores - tombstoneStoresInfo, err := pdCli.GetTombStoneStores() + tombstoneStoresInfo, err := pdCli.GetStoresByState(ctx, metapb.StoreState_Tombstone) + pdCli.GetStores(ctx) if err != nil { tc.Status.TiFlash.Synced = false klog.Warningf("Fail to GetTombStoneStores for TidbCluster %s/%s", tc.Namespace, tc.Name) return err } for _, store := range tombstoneStoresInfo.Stores { - if store.Store != nil && !pattern.Match([]byte(store.Store.Address)) { + if !pattern.Match([]byte(store.Store.Address)) { continue } status := m.getTiFlashStore(store) @@ -820,12 +822,9 @@ func (m *tiflashMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, s return nil } -func (m *tiflashMemberManager) getTiFlashStore(store *pdapi.StoreInfo) *v1alpha1.TiKVStore { - if store.Store == nil || store.Status == nil { - return nil - } - storeID := fmt.Sprintf("%d", store.Store.GetId()) - ip := strings.Split(store.Store.GetAddress(), ":")[0] +func (m *tiflashMemberManager) getTiFlashStore(store pd.StoreInfo) *v1alpha1.TiKVStore { + storeID := fmt.Sprintf("%d", store.Store.ID) + ip := strings.Split(store.Store.Address, ":")[0] podName := strings.Split(ip, ".")[0] return &v1alpha1.TiKVStore{ @@ -848,12 +847,13 @@ func (m *tiflashMemberManager) setStoreLabelsForTiFlash(tc *v1alpha1.TidbCluster setCount := 0 pdCli := controller.GetPDClient(m.deps.PDControl, tc) - storesInfo, err := pdCli.GetStores() + ctx := context.TODO() + storesInfo, err := pdCli.GetStores(ctx) if err != nil { return setCount, err } - config, err := pdCli.GetConfig() + config, err := pdCli.GetConfig(ctx) if err != nil { return setCount, err } @@ -870,7 +870,7 @@ func (m *tiflashMemberManager) setStoreLabelsForTiFlash(tc *v1alpha1.TidbCluster for _, store := range storesInfo.Stores { // In theory, the external tiflash can join the cluster, and the operator would only manage the internal tiflash. // So we check the store owner to make sure it. - if store.Store != nil && !pattern.Match([]byte(store.Store.Address)) { + if !pattern.Match([]byte(store.Store.Address)) { continue } status := m.getTiFlashStore(store) @@ -892,7 +892,7 @@ func (m *tiflashMemberManager) setStoreLabelsForTiFlash(tc *v1alpha1.TidbCluster } if !m.storeLabelsEqualNodeLabels(store.Store.Labels, ls) { - set, err := pdCli.SetStoreLabels(store.Store.Id, ls) + set, err := pdCli.SetStoreLabels(context.TODO(), uint64(store.Store.ID), ls) if err != nil { klog.Warningf("failed to set pod: [%s/%s]'s store labels: %v", ns, podName, ls) continue @@ -909,12 +909,12 @@ func (m *tiflashMemberManager) setStoreLabelsForTiFlash(tc *v1alpha1.TidbCluster // storeLabelsEqualNodeLabels compares store labels with node labels // for historic reasons, PD stores TiFlash labels as []*StoreLabel which is a key-value pair slice -func (m *tiflashMemberManager) storeLabelsEqualNodeLabels(storeLabels []*metapb.StoreLabel, nodeLabels map[string]string) bool { +func (m *tiflashMemberManager) storeLabelsEqualNodeLabels(storeLabels []pd.StoreLabel, nodeLabels map[string]string) bool { ls := map[string]string{} for _, label := range storeLabels { - key := label.GetKey() + key := label.Key if _, ok := nodeLabels[key]; ok { - val := label.GetValue() + val := label.Value ls[key] = val } } diff --git a/pkg/manager/member/tiflash_member_manager_test.go b/pkg/manager/member/tiflash_member_manager_test.go index f0391bd1951..660baa8b7dd 100644 --- a/pkg/manager/member/tiflash_member_manager_test.go +++ b/pkg/manager/member/tiflash_member_manager_test.go @@ -21,13 +21,14 @@ import ( "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" - "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb-operator/pkg/apis/label" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/manager/suspender" "github.com/pingcap/tidb-operator/pkg/manager/volumes" "github.com/pingcap/tidb-operator/pkg/pdapi" + pd "github.com/tikv/pd/client/http" + apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" @@ -288,7 +289,7 @@ func TestTiFlashMemberManagerSetStoreLabelsForTiFlash(t *testing.T) { errWhenGetStores bool hasNode bool hasPod bool - storeInfo *pdapi.StoresInfo + storeInfo *pd.StoresInfo errExpectFn func(*GomegaWithT, error) setCount int labelSetFailed bool @@ -297,8 +298,8 @@ func TestTiFlashMemberManagerSetStoreLabelsForTiFlash(t *testing.T) { tc := newTidbClusterForPD() pmm, _, _, pdClient, podIndexer, nodeIndexer := newFakeTiFlashMemberManager(tc) pdClient.AddReaction(pdapi.GetConfigActionType, func(action *pdapi.Action) (interface{}, error) { - return &pdapi.PDConfigFromAPI{ - Replication: &pdapi.PDReplicationConfig{ + return &pd.ServerConfig{ + Replication: pd.ReplicationConfig{ LocationLabels: []string{"region", "zone", "rack", "host"}, }, }, nil @@ -372,11 +373,8 @@ func TestTiFlashMemberManagerSetStoreLabelsForTiFlash(t *testing.T) { { name: "stores is empty", errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, - hasNode: true, - hasPod: true, + hasNode: true, + hasPod: true, errExpectFn: func(g *GomegaWithT, err error) { g.Expect(err).NotTo(HaveOccurred()) }, @@ -386,15 +384,8 @@ func TestTiFlashMemberManagerSetStoreLabelsForTiFlash(t *testing.T) { { name: "status is nil", errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ - { - Status: nil, - }, - }, - }, - hasNode: true, - hasPod: true, + hasNode: true, + hasPod: true, errExpectFn: func(g *GomegaWithT, err error) { g.Expect(err).NotTo(HaveOccurred()) }, @@ -404,15 +395,8 @@ func TestTiFlashMemberManagerSetStoreLabelsForTiFlash(t *testing.T) { { name: "store is nil", errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ - { - Store: nil, - }, - }, - }, - hasNode: true, - hasPod: true, + hasNode: true, + hasPod: true, errExpectFn: func(g *GomegaWithT, err error) { g.Expect(err).NotTo(HaveOccurred()) }, @@ -422,17 +406,15 @@ func TestTiFlashMemberManagerSetStoreLabelsForTiFlash(t *testing.T) { { name: "don't have pod", errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LeaderCount: 1, LastHeartbeatTS: time.Now(), }, @@ -451,17 +433,15 @@ func TestTiFlashMemberManagerSetStoreLabelsForTiFlash(t *testing.T) { { name: "don't have node", errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LeaderCount: 1, LastHeartbeatTS: time.Now(), }, @@ -479,35 +459,33 @@ func TestTiFlashMemberManagerSetStoreLabelsForTiFlash(t *testing.T) { { name: "already has labels", errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - Labels: []*metapb.StoreLabel{ - { - Key: "region", - Value: "region", - }, - { - Key: "zone", - Value: "zone", - }, - { - Key: "rack", - Value: "rack", - }, - { - Key: "host", - Value: "host", - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), + Labels: []pd.StoreLabel{ + { + Key: "region", + Value: "region", + }, + { + Key: "zone", + Value: "zone", + }, + { + Key: "rack", + Value: "rack", + }, + { + Key: "host", + Value: "host", }, }, StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LeaderCount: 1, LastHeartbeatTS: time.Now(), }, @@ -525,23 +503,21 @@ func TestTiFlashMemberManagerSetStoreLabelsForTiFlash(t *testing.T) { { name: "labels not equal, but set failed", errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - Labels: []*metapb.StoreLabel{ - { - Key: "region", - Value: "region", - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), + Labels: []pd.StoreLabel{ + { + Key: "region", + Value: "region", }, }, StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LeaderCount: 1, LastHeartbeatTS: time.Now(), }, @@ -559,23 +535,21 @@ func TestTiFlashMemberManagerSetStoreLabelsForTiFlash(t *testing.T) { { name: "labels not equal, set success", errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - Labels: []*metapb.StoreLabel{ - { - Key: "region", - Value: "region", - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), + Labels: []pd.StoreLabel{ + { + Key: "region", + Value: "region", }, }, StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LeaderCount: 1, LastHeartbeatTS: time.Now(), }, @@ -605,9 +579,9 @@ func TestTiFlashMemberManagerSyncTidbClusterStatus(t *testing.T) { updateTC func(*v1alpha1.TidbCluster) upgradingFn func(corelisters.PodLister, pdapi.PDControlInterface, *apps.StatefulSet, *v1alpha1.TidbCluster) (bool, error) errWhenGetStores bool - storeInfo *pdapi.StoresInfo + storeInfo *pd.StoresInfo errWhenGetTombstoneStores bool - tombstoneStoreInfo *pdapi.StoresInfo + tombstoneStoreInfo *pd.StoresInfo errExpectFn func(*GomegaWithT, error) tcExpectFn func(*GomegaWithT, *v1alpha1.TidbCluster) } @@ -754,12 +728,12 @@ func TestTiFlashMemberManagerSyncTidbClusterStatus(t *testing.T) { return false, nil }, errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{}, }, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, + tombstoneStoreInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{}, }, errExpectFn: errExpectNil, tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { @@ -774,19 +748,9 @@ func TestTiFlashMemberManagerSyncTidbClusterStatus(t *testing.T) { upgradingFn: func(lister corelisters.PodLister, controlInterface pdapi.PDControlInterface, set *apps.StatefulSet, cluster *v1alpha1.TidbCluster) (bool, error) { return false, nil }, - errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ - { - Store: nil, - }, - }, - }, + errWhenGetStores: false, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, - errExpectFn: errExpectNil, + errExpectFn: errExpectNil, tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { g.Expect(len(tc.Status.TiFlash.Stores)).To(Equal(0)) g.Expect(len(tc.Status.TiFlash.TombstoneStores)).To(Equal(0)) @@ -799,19 +763,9 @@ func TestTiFlashMemberManagerSyncTidbClusterStatus(t *testing.T) { upgradingFn: func(lister corelisters.PodLister, controlInterface pdapi.PDControlInterface, set *apps.StatefulSet, cluster *v1alpha1.TidbCluster) (bool, error) { return false, nil }, - errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ - { - Status: nil, - }, - }, - }, + errWhenGetStores: false, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, - errExpectFn: errExpectNil, + errExpectFn: errExpectNil, tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { g.Expect(len(tc.Status.TiFlash.Stores)).To(Equal(0)) g.Expect(len(tc.Status.TiFlash.TombstoneStores)).To(Equal(0)) @@ -829,38 +783,31 @@ func TestTiFlashMemberManagerSyncTidbClusterStatus(t *testing.T) { return false, nil }, errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Time{}, }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 330, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 330, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, }, }, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, - errExpectFn: errExpectNil, + errExpectFn: errExpectNil, tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { g.Expect(time.Time{}.IsZero()).To(BeTrue()) g.Expect(len(tc.Status.TiFlash.Stores)).To(Equal(1)) @@ -878,38 +825,31 @@ func TestTiFlashMemberManagerSyncTidbClusterStatus(t *testing.T) { return false, nil }, errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Time{}, }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 330, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 330, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, }, }, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, - errExpectFn: errExpectNil, + errExpectFn: errExpectNil, tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { g.Expect(time.Time{}.IsZero()).To(BeTrue()) g.Expect(len(tc.Status.TiFlash.Stores)).To(Equal(1)) @@ -927,38 +867,31 @@ func TestTiFlashMemberManagerSyncTidbClusterStatus(t *testing.T) { return false, nil }, errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 330, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 330, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, }, }, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, - errExpectFn: errExpectNil, + errExpectFn: errExpectNil, tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { g.Expect(time.Time{}.IsZero()).To(BeTrue()) g.Expect(len(tc.Status.TiFlash.Stores)).To(Equal(1)) @@ -976,38 +909,31 @@ func TestTiFlashMemberManagerSyncTidbClusterStatus(t *testing.T) { return false, nil }, errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 330, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 330, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, }, }, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, - errExpectFn: errExpectNil, + errExpectFn: errExpectNil, tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { g.Expect(len(tc.Status.TiFlash.Stores)).To(Equal(1)) g.Expect(tc.Status.TiFlash.Stores["333"].LastTransitionTime.Time.IsZero()).To(BeFalse()) @@ -1028,39 +954,32 @@ func TestTiFlashMemberManagerSyncTidbClusterStatus(t *testing.T) { return false, nil }, errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 330, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 330, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, }, }, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, - errExpectFn: errExpectNil, + errExpectFn: errExpectNil, tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { g.Expect(len(tc.Status.TiFlash.Stores)).To(Equal(1)) g.Expect(tc.Status.TiFlash.Stores["333"].LastTransitionTime).To(Equal(now)) @@ -1081,39 +1000,32 @@ func TestTiFlashMemberManagerSyncTidbClusterStatus(t *testing.T) { return false, nil }, errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), StateName: "Down", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 330, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 330, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, }, }, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, - errExpectFn: errExpectNil, + errExpectFn: errExpectNil, tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { g.Expect(len(tc.Status.TiFlash.Stores)).To(Equal(1)) g.Expect(tc.Status.TiFlash.Stores["333"].LastTransitionTime).NotTo(Equal(now)) @@ -1134,38 +1046,31 @@ func TestTiFlashMemberManagerSyncTidbClusterStatus(t *testing.T) { return false, nil }, errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 330, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 330, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, }, }, errWhenGetTombstoneStores: true, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, errExpectFn: func(g *GomegaWithT, err error) { g.Expect(err).To(HaveOccurred()) g.Expect(strings.Contains(err.Error(), "failed to get tombstone stores")).To(BeTrue()) @@ -1183,58 +1088,50 @@ func TestTiFlashMemberManagerSyncTidbClusterStatus(t *testing.T) { return false, nil }, errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 330, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 330, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, }, }, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + tombstoneStoreInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), StateName: "Tombstone", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 330, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 330, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), StateName: "Tombstone", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, @@ -1256,63 +1153,54 @@ func TestTiFlashMemberManagerSyncTidbClusterStatus(t *testing.T) { return false, nil }, errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc.cluster1.com:20160", "test", "test", "default"), - Labels: []*metapb.StoreLabel{ - { - Key: "engine", - Value: "tiflash", - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc.cluster1.com:20160", "test", "test", "default"), + Labels: []pd.StoreLabel{ + { + Key: "engine", + Value: "tiflash", }, }, StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 334, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc.cluster2.com:20160", "test", "test", "default"), - Labels: []*metapb.StoreLabel{ - { - Key: "engine", - Value: "tiflash", - }, + Store: pd.MetaStore{ + ID: 334, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc.cluster2.com:20160", "test", "test", "default"), + Labels: []pd.StoreLabel{ + { + Key: "engine", + Value: "tiflash", }, }, StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 330, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc.cluster1.com:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 330, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc.cluster1.com:20160", "test", "test", "default"), StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, }, }, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, - errExpectFn: errExpectNil, + errExpectFn: errExpectNil, tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { g.Expect(len(tc.Status.TiFlash.Stores)).To(Equal(1)) g.Expect(len(tc.Status.TiFlash.PeerStores)).To(Equal(1)) diff --git a/pkg/manager/member/tiflash_scaler.go b/pkg/manager/member/tiflash_scaler.go index 470f0896cef..56387836d33 100644 --- a/pkg/manager/member/tiflash_scaler.go +++ b/pkg/manager/member/tiflash_scaler.go @@ -14,6 +14,7 @@ package member import ( + "context" "fmt" "strconv" "time" @@ -145,7 +146,7 @@ func (s *tiflashScaler) scaleInOne(tc *v1alpha1.TidbCluster, ordinal int32) erro return err } if state != v1alpha1.TiKVStateOffline { - if err := controller.GetPDClient(s.deps.PDControl, tc).DeleteStore(id); err != nil { + if err := controller.GetPDClient(s.deps.PDControl, tc).DeleteStore(context.TODO(), id); err != nil { klog.Errorf("tiflash scale in: failed to delete store %d, %v", id, err) return err } diff --git a/pkg/manager/member/tiflash_scaler_test.go b/pkg/manager/member/tiflash_scaler_test.go index 412e701a731..238667bd783 100644 --- a/pkg/manager/member/tiflash_scaler_test.go +++ b/pkg/manager/member/tiflash_scaler_test.go @@ -21,12 +21,13 @@ import ( "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" "github.com/pingcap/advanced-statefulset/client/apis/apps/v1/helper" - "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb-operator/pkg/apis/label" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/features" "github.com/pingcap/tidb-operator/pkg/pdapi" + pd "github.com/tikv/pd/client/http" + apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -601,26 +602,24 @@ func TestTiFlashScalerScaleIn(t *testing.T) { pdClient.AddReaction(pdapi.GetConfigActionType, func(action *pdapi.Action) (interface{}, error) { var replicas uint64 = 3 - return &pdapi.PDConfigFromAPI{ - Replication: &pdapi.PDReplicationConfig{ - MaxReplicas: &replicas, + return &pd.ServerConfig{ + Replication: pd.ReplicationConfig{ + MaxReplicas: replicas, }, }, nil }) if test.getStoresFn == nil { test.getStoresFn = func(action *pdapi.Action) (interface{}, error) { - store := &pdapi.StoreInfo{ - Store: &pdapi.MetaStore{ + store := pd.StoreInfo{ + Store: pd.MetaStore{ StateName: v1alpha1.TiKVStateUp, - Store: &metapb.Store{ - Address: fmt.Sprintf("%s-tiflash-0", "basic"), - }, + Address: fmt.Sprintf("%s-tiflash-0", "basic"), }, } - return &pdapi.StoresInfo{ + return &pd.StoresInfo{ Count: 5, - Stores: []*pdapi.StoreInfo{store, store, store, store, store}, + Stores: []pd.StoreInfo{store, store, store, store, store}, }, nil } } @@ -953,26 +952,24 @@ func TestTiFlashScalerScaleInSimultaneously(t *testing.T) { pdClient.AddReaction(pdapi.GetConfigActionType, func(action *pdapi.Action) (interface{}, error) { var replicas uint64 = 3 - return &pdapi.PDConfigFromAPI{ - Replication: &pdapi.PDReplicationConfig{ - MaxReplicas: &replicas, + return &pd.ServerConfig{ + Replication: pd.ReplicationConfig{ + MaxReplicas: replicas, }, }, nil }) if test.getStoresFn == nil { test.getStoresFn = func(action *pdapi.Action) (interface{}, error) { - store := &pdapi.StoreInfo{ - Store: &pdapi.MetaStore{ + store := pd.StoreInfo{ + Store: pd.MetaStore{ StateName: v1alpha1.TiKVStateUp, - Store: &metapb.Store{ - Address: fmt.Sprintf("%s-tiflash-0", "basic"), - }, + Address: fmt.Sprintf("%s-tiflash-0", "basic"), }, } - return &pdapi.StoresInfo{ + return &pd.StoresInfo{ Count: 5, - Stores: []*pdapi.StoreInfo{store, store, store, store, store}, + Stores: []pd.StoreInfo{store, store, store, store, store}, }, nil } } @@ -1250,17 +1247,15 @@ func TestTiFlashScalerScaleInSimultaneously(t *testing.T) { storeIdLabel: "13", }}, getStoresFn: func(action *pdapi.Action) (interface{}, error) { - store := &pdapi.StoreInfo{ - Store: &pdapi.MetaStore{ + store := pd.StoreInfo{ + Store: pd.MetaStore{ StateName: v1alpha1.TiKVStateUp, - Store: &metapb.Store{ - Address: fmt.Sprintf("%s-tiflash-0", "basic"), - }, + Address: fmt.Sprintf("%s-tiflash-0", "basic"), }, } - return &pdapi.StoresInfo{ + return &pd.StoresInfo{ Count: 6, - Stores: []*pdapi.StoreInfo{store, store, store, store, store, store}, + Stores: []pd.StoreInfo{store, store, store, store, store, store}, }, nil }, scaleInParallelism: 2, @@ -1458,25 +1453,23 @@ func TestTiFlashScalerScaleInSimultaneouslyExtra(t *testing.T) { pdClient := controller.NewFakePDClient(pdControl, tc) pdClient.AddReaction(pdapi.GetConfigActionType, func(action *pdapi.Action) (interface{}, error) { var replicas uint64 = 3 - return &pdapi.PDConfigFromAPI{ - Replication: &pdapi.PDReplicationConfig{ - MaxReplicas: &replicas, + return &pd.ServerConfig{ + Replication: pd.ReplicationConfig{ + MaxReplicas: replicas, }, }, nil }) if test.getStoresFn == nil { test.getStoresFn = func(action *pdapi.Action) (interface{}, error) { - store := &pdapi.StoreInfo{ - Store: &pdapi.MetaStore{ + store := pd.StoreInfo{ + Store: pd.MetaStore{ StateName: v1alpha1.TiKVStateUp, - Store: &metapb.Store{ - Address: fmt.Sprintf("%s-tikv-0", "basic"), - }, + Address: fmt.Sprintf("%s-tikv-0", "basic"), }, } - return &pdapi.StoresInfo{ + return &pd.StoresInfo{ Count: 5, - Stores: []*pdapi.StoreInfo{store, store, store, store, store}, + Stores: []pd.StoreInfo{store, store, store, store, store}, }, nil } } diff --git a/pkg/manager/member/tikv_member_manager.go b/pkg/manager/member/tikv_member_manager.go index 9f656920cfc..3b926153a12 100644 --- a/pkg/manager/member/tikv_member_manager.go +++ b/pkg/manager/member/tikv_member_manager.go @@ -14,6 +14,7 @@ package member import ( + "context" "fmt" "path" "reflect" @@ -31,6 +32,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/manager/volumes" "github.com/pingcap/tidb-operator/pkg/pdapi" "github.com/pingcap/tidb-operator/pkg/util" + pd "github.com/tikv/pd/client/http" "github.com/pingcap/advanced-statefulset/client/apis/apps/v1/helper" "github.com/pingcap/kvproto/pkg/metapb" @@ -839,7 +841,7 @@ func (m *tikvMemberManager) syncTiKVClusterStatus(tc *v1alpha1.TidbCluster, set pdCli := controller.GetPDClient(m.deps.PDControl, tc) // This only returns Up/Down/Offline stores - storesInfo, err := pdCli.GetStores() + storesInfo, err := pdCli.GetStores(context.TODO()) if err != nil { if pdapi.IsTiKVNotBootstrappedError(err) { klog.Infof("TiKV of Cluster %s/%s not bootstrapped yet", tc.Namespace, tc.Name) @@ -877,23 +879,21 @@ func (m *tikvMemberManager) syncTiKVClusterStatus(tc *v1alpha1.TidbCluster, set // In theory, the external tikv can join the cluster, and the operator would only manage the internal tikv. // So we check the store owner to make sure it. - if store.Store != nil { - if pattern.Match([]byte(store.Store.Address)) { - stores[status.ID] = *status - } else if util.MatchLabelFromStoreLabels(store.Store.Labels, label.TiKVLabelVal) { - peerStores[status.ID] = *status - } + if pattern.Match([]byte(store.Store.Address)) { + stores[status.ID] = *status + } else if util.MatchLabelFromStoreLabels(store.Store.Labels, label.TiKVLabelVal) { + peerStores[status.ID] = *status } } // this returns all tombstone stores - tombstoneStoresInfo, err := pdCli.GetTombStoneStores() + tombstoneStoresInfo, err := pdCli.GetStoresByState(context.TODO(), metapb.StoreState_Tombstone) if err != nil { tc.Status.TiKV.Synced = false return err } for _, store := range tombstoneStoresInfo.Stores { - if store.Store != nil && !pattern.Match([]byte(store.Store.Address)) { + if !pattern.Match([]byte(store.Store.Address)) { continue } status := getTiKVStore(store) @@ -928,12 +928,9 @@ func (m *tikvMemberManager) syncTiKVClusterStatus(tc *v1alpha1.TidbCluster, set return nil } -func getTiKVStore(store *pdapi.StoreInfo) *v1alpha1.TiKVStore { - if store.Store == nil || store.Status == nil { - return nil - } - storeID := fmt.Sprintf("%d", store.Store.GetId()) - ip := strings.Split(store.Store.GetAddress(), ":")[0] +func getTiKVStore(store pd.StoreInfo) *v1alpha1.TiKVStore { + storeID := fmt.Sprintf("%d", store.Store.ID) + ip := strings.Split(store.Store.Address, ":")[0] podName := strings.Split(ip, ".")[0] return &v1alpha1.TiKVStore{ @@ -961,12 +958,12 @@ func (m *tikvMemberManager) setStoreLabelsForTiKV(tc *v1alpha1.TidbCluster) (int } pdCli := controller.GetPDClient(m.deps.PDControl, tc) - storesInfo, err := pdCli.GetStores() + storesInfo, err := pdCli.GetStores(context.TODO()) if err != nil { return setCount, err } - config, err := pdCli.GetConfig() + config, err := pdCli.GetConfig(context.TODO()) if err != nil { return setCount, err } @@ -983,7 +980,7 @@ func (m *tikvMemberManager) setStoreLabelsForTiKV(tc *v1alpha1.TidbCluster) (int for _, store := range storesInfo.Stores { // In theory, the external tikv can join the cluster, and the operator would only manage the internal tikv. // So we check the store owner to make sure it. - if store.Store != nil && !pattern.Match([]byte(store.Store.Address)) { + if !pattern.Match([]byte(store.Store.Address)) { continue } status := getTiKVStore(store) @@ -1005,10 +1002,10 @@ func (m *tikvMemberManager) setStoreLabelsForTiKV(tc *v1alpha1.TidbCluster) (int } if !m.storeLabelsEqualNodeLabels(store.Store.Labels, ls) { - set, err := pdCli.SetStoreLabels(store.Store.Id, ls) + set, err := pdCli.SetStoreLabels(context.TODO(), uint64(store.Store.ID), ls) if err != nil { msg := fmt.Sprintf("failed to set labels %v for store (id: %d, pod: %s/%s): %v ", - ls, store.Store.Id, ns, podName, err) + ls, store.Store.ID, ns, podName, err) m.deps.Recorder.Event(tc, corev1.EventTypeWarning, FailedSetStoreLabels, msg) continue } @@ -1024,12 +1021,12 @@ func (m *tikvMemberManager) setStoreLabelsForTiKV(tc *v1alpha1.TidbCluster) (int // storeLabelsEqualNodeLabels compares store labels with node labels // for historic reasons, PD stores TiKV labels as []*StoreLabel which is a key-value pair slice -func (m *tikvMemberManager) storeLabelsEqualNodeLabels(storeLabels []*metapb.StoreLabel, nodeLabels map[string]string) bool { +func (m *tikvMemberManager) storeLabelsEqualNodeLabels(storeLabels []pd.StoreLabel, nodeLabels map[string]string) bool { ls := map[string]string{} for _, label := range storeLabels { - key := label.GetKey() + key := label.Key if _, ok := nodeLabels[key]; ok { - val := label.GetValue() + val := label.Value ls[key] = val } } diff --git a/pkg/manager/member/tikv_member_manager_test.go b/pkg/manager/member/tikv_member_manager_test.go index b99e3de9b15..2f0e3eb2422 100644 --- a/pkg/manager/member/tikv_member_manager_test.go +++ b/pkg/manager/member/tikv_member_manager_test.go @@ -21,7 +21,6 @@ import ( "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" - "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb-operator/pkg/apis/label" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/apis/util/toml" @@ -29,6 +28,8 @@ import ( "github.com/pingcap/tidb-operator/pkg/manager/suspender" "github.com/pingcap/tidb-operator/pkg/manager/volumes" "github.com/pingcap/tidb-operator/pkg/pdapi" + pd "github.com/tikv/pd/client/http" + apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" @@ -54,8 +55,8 @@ func TestTiKVMemberManagerSyncCreate(t *testing.T) { tls bool tikvPeerSvcCreated bool setCreated bool - pdStores *pdapi.StoresInfo - tombstoneStores *pdapi.StoresInfo + pdStores *pd.StoresInfo + tombstoneStores *pd.StoresInfo } testFn := func(test *testcase, t *testing.T) { @@ -150,8 +151,6 @@ func TestTiKVMemberManagerSyncCreate(t *testing.T) { err: false, tikvPeerSvcCreated: true, setCreated: true, - pdStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, - tombstoneStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, }, { name: "normal with tls", @@ -162,8 +161,6 @@ func TestTiKVMemberManagerSyncCreate(t *testing.T) { tls: true, tikvPeerSvcCreated: true, setCreated: true, - pdStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, - tombstoneStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, }, { name: "pd is not available", @@ -175,8 +172,6 @@ func TestTiKVMemberManagerSyncCreate(t *testing.T) { err: true, tikvPeerSvcCreated: false, setCreated: false, - pdStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, - tombstoneStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, }, { name: "error when create statefulset", @@ -186,8 +181,6 @@ func TestTiKVMemberManagerSyncCreate(t *testing.T) { err: true, tikvPeerSvcCreated: true, setCreated: false, - pdStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, - tombstoneStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, }, { name: "error when create tikv peer service", @@ -197,8 +190,6 @@ func TestTiKVMemberManagerSyncCreate(t *testing.T) { err: true, tikvPeerSvcCreated: false, setCreated: false, - pdStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, - tombstoneStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, }, { name: "skip create when suspend", @@ -209,8 +200,6 @@ func TestTiKVMemberManagerSyncCreate(t *testing.T) { err: false, tikvPeerSvcCreated: false, setCreated: false, - pdStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, - tombstoneStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, }, } @@ -224,8 +213,8 @@ func TestTiKVMemberManagerSyncUpdate(t *testing.T) { type testcase struct { name string modify func(cluster *v1alpha1.TidbCluster) - pdStores *pdapi.StoresInfo - tombstoneStores *pdapi.StoresInfo + pdStores *pd.StoresInfo + tombstoneStores *pd.StoresInfo errWhenUpdateStatefulSet bool errWhenUpdateTiKVPeerService bool errWhenGetStores bool @@ -252,8 +241,8 @@ func TestTiKVMemberManagerSyncUpdate(t *testing.T) { tkmm, fakeSetControl, fakeSvcControl, pdClient, _, _ := newFakeTiKVMemberManager(tc) pdClient.AddReaction(pdapi.GetConfigActionType, func(action *pdapi.Action) (interface{}, error) { - return &pdapi.PDConfigFromAPI{ - Replication: &pdapi.PDReplicationConfig{ + return &pd.ServerConfig{ + Replication: pd.ReplicationConfig{ LocationLabels: []string{"region", "zone", "rack", "host"}, }, }, nil @@ -335,8 +324,6 @@ func TestTiKVMemberManagerSyncUpdate(t *testing.T) { tc.Status.PD.Phase = v1alpha1.NormalPhase }, // TODO add unit test for status sync - pdStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, - tombstoneStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, errWhenUpdateStatefulSet: false, errWhenUpdateTiKVPeerService: false, errWhenGetStores: false, @@ -358,8 +345,6 @@ func TestTiKVMemberManagerSyncUpdate(t *testing.T) { tc.Spec.TiKV.Replicas = 5 tc.Status.PD.Phase = v1alpha1.NormalPhase }, - pdStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, - tombstoneStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, errWhenUpdateStatefulSet: true, errWhenUpdateTiKVPeerService: false, err: true, @@ -374,8 +359,6 @@ func TestTiKVMemberManagerSyncUpdate(t *testing.T) { tc.Spec.TiKV.Replicas = 5 tc.Status.PD.Phase = v1alpha1.NormalPhase }, - pdStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, - tombstoneStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, errWhenUpdateStatefulSet: false, errWhenUpdateTiKVPeerService: false, errWhenGetStores: true, @@ -395,8 +378,6 @@ func TestTiKVMemberManagerSyncUpdate(t *testing.T) { tc.Spec.TiKV.SeparateRocksDBLog = pointer.BoolPtr(true) tc.Spec.TiKV.SeparateRaftLog = pointer.BoolPtr(true) }, - pdStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, - tombstoneStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, errWhenUpdateStatefulSet: false, errWhenUpdateTiKVPeerService: false, errWhenGetStores: false, @@ -415,8 +396,6 @@ func TestTiKVMemberManagerSyncUpdate(t *testing.T) { tc.Spec.TiKV.ServiceAccount = "test_new_account" tc.Status.TiKV.VolReplaceInProgress = true }, - pdStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, - tombstoneStores: &pdapi.StoresInfo{Count: 0, Stores: []*pdapi.StoreInfo{}}, errWhenUpdateStatefulSet: false, errWhenUpdateTiKVPeerService: false, errWhenGetStores: false, @@ -544,7 +523,7 @@ func TestTiKVMemberManagerSetStoreLabelsForTiKV(t *testing.T) { errWhenGetStores bool hasNode bool hasPod bool - storeInfo *pdapi.StoresInfo + storeInfo *pd.StoresInfo errExpectFn func(*GomegaWithT, error) setCount int labelSetFailed bool @@ -554,8 +533,8 @@ func TestTiKVMemberManagerSetStoreLabelsForTiKV(t *testing.T) { tc.Status.TiKV.BootStrapped = true pmm, _, _, pdClient, podIndexer, nodeIndexer := newFakeTiKVMemberManager(tc) pdClient.AddReaction(pdapi.GetConfigActionType, func(action *pdapi.Action) (interface{}, error) { - return &pdapi.PDConfigFromAPI{ - Replication: &pdapi.PDReplicationConfig{ + return &pd.ServerConfig{ + Replication: pd.ReplicationConfig{ LocationLabels: []string{"region", "zone", "rack", "host"}, }, }, nil @@ -629,11 +608,8 @@ func TestTiKVMemberManagerSetStoreLabelsForTiKV(t *testing.T) { { name: "stores is empty", errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, - hasNode: true, - hasPod: true, + hasNode: true, + hasPod: true, errExpectFn: func(g *GomegaWithT, err error) { g.Expect(err).NotTo(HaveOccurred()) }, @@ -643,15 +619,8 @@ func TestTiKVMemberManagerSetStoreLabelsForTiKV(t *testing.T) { { name: "status is nil", errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ - { - Status: nil, - }, - }, - }, - hasNode: true, - hasPod: true, + hasNode: true, + hasPod: true, errExpectFn: func(g *GomegaWithT, err error) { g.Expect(err).NotTo(HaveOccurred()) }, @@ -661,15 +630,8 @@ func TestTiKVMemberManagerSetStoreLabelsForTiKV(t *testing.T) { { name: "store is nil", errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ - { - Store: nil, - }, - }, - }, - hasNode: true, - hasPod: true, + hasNode: true, + hasPod: true, errExpectFn: func(g *GomegaWithT, err error) { g.Expect(err).NotTo(HaveOccurred()) }, @@ -679,17 +641,15 @@ func TestTiKVMemberManagerSetStoreLabelsForTiKV(t *testing.T) { { name: "don't have pod", errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LeaderCount: 1, LastHeartbeatTS: time.Now(), }, @@ -708,17 +668,15 @@ func TestTiKVMemberManagerSetStoreLabelsForTiKV(t *testing.T) { { name: "don't have node", errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LeaderCount: 1, LastHeartbeatTS: time.Now(), }, @@ -736,35 +694,33 @@ func TestTiKVMemberManagerSetStoreLabelsForTiKV(t *testing.T) { { name: "already has labels", errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - Labels: []*metapb.StoreLabel{ - { - Key: "region", - Value: "region", - }, - { - Key: "zone", - Value: "zone", - }, - { - Key: "rack", - Value: "rack", - }, - { - Key: "host", - Value: "host", - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), + Labels: []pd.StoreLabel{ + { + Key: "region", + Value: "region", + }, + { + Key: "zone", + Value: "zone", + }, + { + Key: "rack", + Value: "rack", + }, + { + Key: "host", + Value: "host", }, }, StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LeaderCount: 1, LastHeartbeatTS: time.Now(), }, @@ -782,23 +738,21 @@ func TestTiKVMemberManagerSetStoreLabelsForTiKV(t *testing.T) { { name: "labels not equal, but set failed", errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - Labels: []*metapb.StoreLabel{ - { - Key: "region", - Value: "region", - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), + Labels: []pd.StoreLabel{ + { + Key: "region", + Value: "region", }, }, StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LeaderCount: 1, LastHeartbeatTS: time.Now(), }, @@ -816,23 +770,21 @@ func TestTiKVMemberManagerSetStoreLabelsForTiKV(t *testing.T) { { name: "labels not equal, set success", errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - Labels: []*metapb.StoreLabel{ - { - Key: "region", - Value: "region", - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), + Labels: []pd.StoreLabel{ + { + Key: "region", + Value: "region", }, }, StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LeaderCount: 1, LastHeartbeatTS: time.Now(), }, @@ -862,9 +814,9 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) { updateTC func(*v1alpha1.TidbCluster) upgradingFn func(corelisters.PodLister, pdapi.PDControlInterface, *apps.StatefulSet, *v1alpha1.TidbCluster) (bool, error) errWhenGetStores bool - storeInfo *pdapi.StoresInfo + storeInfo *pd.StoresInfo errWhenGetTombstoneStores bool - tombstoneStoreInfo *pdapi.StoresInfo + tombstoneStoreInfo *pd.StoresInfo errExpectFn func(*GomegaWithT, error) tcExpectFn func(*GomegaWithT, *v1alpha1.TidbCluster) } @@ -1071,15 +1023,9 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) { upgradingFn: func(lister corelisters.PodLister, controlInterface pdapi.PDControlInterface, set *apps.StatefulSet, cluster *v1alpha1.TidbCluster) (bool, error) { return false, nil }, - errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, + errWhenGetStores: false, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, - errExpectFn: errExpectNil, + errExpectFn: errExpectNil, tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { g.Expect(len(tc.Status.TiKV.Stores)).To(Equal(0)) g.Expect(len(tc.Status.TiKV.TombstoneStores)).To(Equal(0)) @@ -1092,19 +1038,9 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) { upgradingFn: func(lister corelisters.PodLister, controlInterface pdapi.PDControlInterface, set *apps.StatefulSet, cluster *v1alpha1.TidbCluster) (bool, error) { return false, nil }, - errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ - { - Store: nil, - }, - }, - }, + errWhenGetStores: false, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, - errExpectFn: errExpectNil, + errExpectFn: errExpectNil, tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { g.Expect(len(tc.Status.TiKV.Stores)).To(Equal(0)) g.Expect(len(tc.Status.TiKV.TombstoneStores)).To(Equal(0)) @@ -1117,19 +1053,9 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) { upgradingFn: func(lister corelisters.PodLister, controlInterface pdapi.PDControlInterface, set *apps.StatefulSet, cluster *v1alpha1.TidbCluster) (bool, error) { return false, nil }, - errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ - { - Status: nil, - }, - }, - }, + errWhenGetStores: false, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, - errExpectFn: errExpectNil, + errExpectFn: errExpectNil, tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { g.Expect(len(tc.Status.TiKV.Stores)).To(Equal(0)) g.Expect(len(tc.Status.TiKV.TombstoneStores)).To(Equal(0)) @@ -1147,37 +1073,30 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) { return false, nil }, errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Time{}, }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 330, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 330, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Time{}, }, }, }, }, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, - errExpectFn: errExpectNil, + errExpectFn: errExpectNil, tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { g.Expect(time.Time{}.IsZero()).To(BeTrue()) g.Expect(len(tc.Status.TiKV.Stores)).To(Equal(1)) @@ -1195,37 +1114,30 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) { return false, nil }, errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Time{}, }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 330, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 330, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Time{}, }, }, }, }, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, - errExpectFn: errExpectNil, + errExpectFn: errExpectNil, tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { g.Expect(time.Time{}.IsZero()).To(BeTrue()) g.Expect(len(tc.Status.TiKV.Stores)).To(Equal(1)) @@ -1243,37 +1155,30 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) { return false, nil }, errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 330, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 330, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Time{}, }, }, }, }, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, - errExpectFn: errExpectNil, + errExpectFn: errExpectNil, tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { g.Expect(time.Time{}.IsZero()).To(BeTrue()) g.Expect(len(tc.Status.TiKV.Stores)).To(Equal(1)) @@ -1291,37 +1196,30 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) { return false, nil }, errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 330, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 330, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Time{}, }, }, }, }, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, - errExpectFn: errExpectNil, + errExpectFn: errExpectNil, tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { g.Expect(len(tc.Status.TiKV.Stores)).To(Equal(1)) g.Expect(tc.Status.TiKV.Stores["333"].LastTransitionTime.Time.IsZero()).To(BeFalse()) @@ -1342,38 +1240,31 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) { return false, nil }, errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 330, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 330, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Time{}, }, }, }, }, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, - errExpectFn: errExpectNil, + errExpectFn: errExpectNil, tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { g.Expect(len(tc.Status.TiKV.Stores)).To(Equal(1)) g.Expect(tc.Status.TiKV.Stores["333"].LastTransitionTime).To(Equal(now)) @@ -1394,38 +1285,31 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) { return false, nil }, errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), StateName: "Down", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 330, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 330, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Time{}, }, }, }, }, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, - errExpectFn: errExpectNil, + errExpectFn: errExpectNil, tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { g.Expect(len(tc.Status.TiKV.Stores)).To(Equal(1)) g.Expect(tc.Status.TiKV.Stores["333"].LastTransitionTime).NotTo(Equal(now)) @@ -1446,37 +1330,30 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) { return false, nil }, errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 330, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 330, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Time{}, }, }, }, }, errWhenGetTombstoneStores: true, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, errExpectFn: func(g *GomegaWithT, err error) { g.Expect(err).To(HaveOccurred()) g.Expect(strings.Contains(err.Error(), "failed to get tombstone stores")).To(BeTrue()) @@ -1494,57 +1371,49 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) { return false, nil }, errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 330, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 330, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Time{}, }, }, }, }, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + tombstoneStoreInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc:20160", "test", "test", "default"), StateName: "Tombstone", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 330, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 330, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc:20160", "test", "test", "default"), StateName: "Tombstone", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, @@ -1566,57 +1435,48 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) { return false, nil }, errWhenGetStores: false, - storeInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storeInfo: &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 333, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc.cluster1.com:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 333, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc.cluster1.com:20160", "test", "test", "default"), StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 334, - Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc.cluster2.com:20160", "test", "test", "default"), - }, + Store: pd.MetaStore{ + ID: 334, + Address: fmt.Sprintf("%s-tikv-1.%s-tikv-peer.%s.svc.cluster2.com:20160", "test", "test", "default"), StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, { - Store: &pdapi.MetaStore{ - Store: &metapb.Store{ - Id: 330, - Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc.cluster1.com:20160", "test", "test", "default"), - Labels: []*metapb.StoreLabel{ - { - Key: "engine", - Value: "tiflash", - }, + Store: pd.MetaStore{ + ID: 330, + Address: fmt.Sprintf("%s-tiflash-1.%s-tiflash-peer.%s.svc.cluster1.com:20160", "test", "test", "default"), + Labels: []pd.StoreLabel{ + { + Key: "engine", + Value: "tiflash", }, }, StateName: "Up", }, - Status: &pdapi.StoreStatus{ + Status: pd.StoreStatus{ LastHeartbeatTS: time.Now(), }, }, }, }, errWhenGetTombstoneStores: false, - tombstoneStoreInfo: &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{}, - }, - errExpectFn: errExpectNil, + errExpectFn: errExpectNil, tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { g.Expect(len(tc.Status.TiKV.Stores)).To(Equal(1)) g.Expect(len(tc.Status.TiKV.PeerStores)).To(Equal(1)) diff --git a/pkg/manager/member/tikv_scaler.go b/pkg/manager/member/tikv_scaler.go index 9c35a14e0ee..e3212376681 100644 --- a/pkg/manager/member/tikv_scaler.go +++ b/pkg/manager/member/tikv_scaler.go @@ -14,6 +14,7 @@ package member import ( + "context" "fmt" "strconv" "time" @@ -127,19 +128,20 @@ func (s *tikvScaler) ScaleIn(meta metav1.Object, oldSet *apps.StatefulSet, newSe skipPreCheck = true } else { var err error + ctx := context.TODO() pdClient := controller.GetPDClient(s.deps.PDControl, tc) - storesInfo, err := pdClient.GetStores() + storesInfo, err := pdClient.GetStores(ctx) if err != nil { return fmt.Errorf("failed to get stores info in TidbCluster %s/%s", tc.GetNamespace(), tc.GetName()) } - config, err := pdClient.GetConfig() + config, err := pdClient.GetConfig(ctx) if err != nil { return fmt.Errorf("failed to get config in TidbCluster %s/%s", tc.GetNamespace(), tc.GetName()) } - maxReplicas = int(*(config.Replication.MaxReplicas)) + maxReplicas = int(config.Replication.MaxReplicas) // filter out TiFlash for _, store := range storesInfo.Stores { - if store.Store != nil && store.Store.StateName == v1alpha1.TiKVStateUp && util.MatchLabelFromStoreLabels(store.Store.Labels, label.TiKVLabelVal) { + if store.Store.StateName == v1alpha1.TiKVStateUp && util.MatchLabelFromStoreLabels(store.Store.Labels, label.TiKVLabelVal) { upTikvStoreCount++ } } @@ -207,7 +209,7 @@ func (s *tikvScaler) scaleInOne(tc *v1alpha1.TidbCluster, skipPreCheck bool, upT return deletedUpStore, err } if state != v1alpha1.TiKVStateOffline { - if err := controller.GetPDClient(s.deps.PDControl, tc).DeleteStore(id); err != nil { + if err := controller.GetPDClient(s.deps.PDControl, tc).DeleteStore(context.TODO(), id); err != nil { klog.Errorf("tikvScaler.ScaleIn: failed to delete store %d, %v", id, err) return deletedUpStore, err } diff --git a/pkg/manager/member/tikv_scaler_test.go b/pkg/manager/member/tikv_scaler_test.go index ac524f7b8ea..77f1586ead0 100644 --- a/pkg/manager/member/tikv_scaler_test.go +++ b/pkg/manager/member/tikv_scaler_test.go @@ -22,12 +22,13 @@ import ( . "github.com/onsi/gomega" "github.com/pingcap/advanced-statefulset/client/apis/apps/v1/helper" perrors "github.com/pingcap/errors" - "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb-operator/pkg/apis/label" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/features" "github.com/pingcap/tidb-operator/pkg/pdapi" + pd "github.com/tikv/pd/client/http" + apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -625,26 +626,24 @@ func TestTiKVScalerScaleIn(t *testing.T) { pdClient.AddReaction(pdapi.GetConfigActionType, func(action *pdapi.Action) (interface{}, error) { var replicas uint64 = 3 - return &pdapi.PDConfigFromAPI{ - Replication: &pdapi.PDReplicationConfig{ - MaxReplicas: &replicas, + return &pd.ServerConfig{ + Replication: pd.ReplicationConfig{ + MaxReplicas: replicas, }, }, nil }) if test.getStoresFn == nil { test.getStoresFn = func(action *pdapi.Action) (interface{}, error) { - store := &pdapi.StoreInfo{ - Store: &pdapi.MetaStore{ + store := pd.StoreInfo{ + Store: pd.MetaStore{ StateName: v1alpha1.TiKVStateUp, - Store: &metapb.Store{ - Address: fmt.Sprintf("%s-tikv-0", "basic"), - }, + Address: fmt.Sprintf("%s-tikv-0", "basic"), }, } - return &pdapi.StoresInfo{ + return &pd.StoresInfo{ Count: 5, - Stores: []*pdapi.StoreInfo{store, store, store, store, store}, + Stores: []pd.StoreInfo{store, store, store, store, store}, }, nil } } @@ -886,17 +885,15 @@ func TestTiKVScalerScaleIn(t *testing.T) { errExpectFn: errExpectNotNil, changed: false, getStoresFn: func(action *pdapi.Action) (interface{}, error) { - store := &pdapi.StoreInfo{ - Store: &pdapi.MetaStore{ + store := pd.StoreInfo{ + Store: pd.MetaStore{ StateName: v1alpha1.TiKVStateUp, - Store: &metapb.Store{ - Address: fmt.Sprintf("%s-tikv-0", "basic"), - }, + Address: fmt.Sprintf("%s-tikv-0", "basic"), }, } - return &pdapi.StoresInfo{ + return &pd.StoresInfo{ Count: 3, - Stores: []*pdapi.StoreInfo{store, store, store}, + Stores: []pd.StoreInfo{store, store, store}, }, nil }, }, @@ -913,31 +910,27 @@ func TestTiKVScalerScaleIn(t *testing.T) { errExpectFn: errExpectNotNil, changed: false, getStoresFn: func(action *pdapi.Action) (interface{}, error) { - store := &pdapi.StoreInfo{ - Store: &pdapi.MetaStore{ + store := pd.StoreInfo{ + Store: pd.MetaStore{ StateName: v1alpha1.TiKVStateUp, - Store: &metapb.Store{ - Address: fmt.Sprintf("%s-tikv-0", "basic"), - }, + Address: fmt.Sprintf("%s-tikv-0", "basic"), }, } - tiflashstore := &pdapi.StoreInfo{ - Store: &pdapi.MetaStore{ + tiflashStore := pd.StoreInfo{ + Store: pd.MetaStore{ StateName: v1alpha1.TiKVStateUp, - Store: &metapb.Store{ - Address: fmt.Sprintf("%s-tiflash-0", "basic"), - Labels: []*metapb.StoreLabel{ - { - Key: "engine", - Value: "tiflash", - }, + Address: fmt.Sprintf("%s-tiflash-0", "basic"), + Labels: []pd.StoreLabel{ + { + Key: "engine", + Value: "tiflash", }, }, }, } - return &pdapi.StoresInfo{ + return pd.StoresInfo{ Count: 4, - Stores: []*pdapi.StoreInfo{store, store, store, tiflashstore}, + Stores: []pd.StoreInfo{store, store, store, tiflashStore}, }, nil }, }, @@ -1084,26 +1077,24 @@ func TestTiKVScalerScaleInSimultaneously(t *testing.T) { pdClient.AddReaction(pdapi.GetConfigActionType, func(action *pdapi.Action) (interface{}, error) { var replicas uint64 = 3 - return &pdapi.PDConfigFromAPI{ - Replication: &pdapi.PDReplicationConfig{ - MaxReplicas: &replicas, + return &pd.ServerConfig{ + Replication: pd.ReplicationConfig{ + MaxReplicas: replicas, }, }, nil }) if test.getStoresFn == nil { test.getStoresFn = func(action *pdapi.Action) (interface{}, error) { - store := &pdapi.StoreInfo{ - Store: &pdapi.MetaStore{ + store := pd.StoreInfo{ + Store: pd.MetaStore{ StateName: v1alpha1.TiKVStateUp, - Store: &metapb.Store{ - Address: fmt.Sprintf("%s-tikv-0", "basic"), - }, + Address: fmt.Sprintf("%s-tikv-0", "basic"), }, } - return &pdapi.StoresInfo{ + return pd.StoresInfo{ Count: 5, - Stores: []*pdapi.StoreInfo{store, store, store, store, store}, + Stores: []pd.StoreInfo{store, store, store, store, store}, }, nil } } @@ -1391,17 +1382,15 @@ func TestTiKVScalerScaleInSimultaneously(t *testing.T) { storeIdLabel: "13", }}, getStoresFn: func(action *pdapi.Action) (interface{}, error) { - store := &pdapi.StoreInfo{ - Store: &pdapi.MetaStore{ + store := pd.StoreInfo{ + Store: pd.MetaStore{ StateName: v1alpha1.TiKVStateUp, - Store: &metapb.Store{ - Address: fmt.Sprintf("%s-tikv-0", "basic"), - }, + Address: fmt.Sprintf("%s-tikv-0", "basic"), }, } - return &pdapi.StoresInfo{ + return pd.StoresInfo{ Count: 6, - Stores: []*pdapi.StoreInfo{store, store, store, store, store, store}, + Stores: []pd.StoreInfo{store, store, store, store, store, store}, }, nil }, scaleInParallelism: 2, @@ -1544,17 +1533,15 @@ func TestTiKVScalerScaleInSimultaneously(t *testing.T) { storeIdLabel: "13", }}, getStoresFn: func(action *pdapi.Action) (interface{}, error) { - store := &pdapi.StoreInfo{ - Store: &pdapi.MetaStore{ + store := pd.StoreInfo{ + Store: pd.MetaStore{ StateName: v1alpha1.TiKVStateUp, - Store: &metapb.Store{ - Address: fmt.Sprintf("%s-tikv-0", "basic"), - }, + Address: fmt.Sprintf("%s-tikv-0", "basic"), }, } - return &pdapi.StoresInfo{ + return pd.StoresInfo{ Count: 4, - Stores: []*pdapi.StoreInfo{store, store, store, store}, + Stores: []pd.StoreInfo{store, store, store, store}, }, nil }, scaleInParallelism: 2, @@ -1609,31 +1596,27 @@ func TestTiKVScalerScaleInSimultaneously(t *testing.T) { storeIdLabel: "13", }}, getStoresFn: func(action *pdapi.Action) (interface{}, error) { - store := &pdapi.StoreInfo{ - Store: &pdapi.MetaStore{ + store := pd.StoreInfo{ + Store: pd.MetaStore{ StateName: v1alpha1.TiKVStateUp, - Store: &metapb.Store{ - Address: fmt.Sprintf("%s-tikv-0", "basic"), - }, + Address: fmt.Sprintf("%s-tikv-0", "basic"), }, } - tiflashstore := &pdapi.StoreInfo{ - Store: &pdapi.MetaStore{ + tiflashStore := pd.StoreInfo{ + Store: pd.MetaStore{ StateName: v1alpha1.TiKVStateUp, - Store: &metapb.Store{ - Address: fmt.Sprintf("%s-tiflash-0", "basic"), - Labels: []*metapb.StoreLabel{ - { - Key: "engine", - Value: "tiflash", - }, + Address: fmt.Sprintf("%s-tiflash-0", "basic"), + Labels: []pd.StoreLabel{ + { + Key: "engine", + Value: "tiflash", }, }, }, } - return &pdapi.StoresInfo{ + return pd.StoresInfo{ Count: 5, - Stores: []*pdapi.StoreInfo{store, store, store, store, tiflashstore}, + Stores: []pd.StoreInfo{store, store, store, store, tiflashStore}, }, nil }, scaleInParallelism: 2, @@ -1747,25 +1730,23 @@ func TestTiKVScalerScaleInSimultaneouslyExtra(t *testing.T) { pdClient := controller.NewFakePDClient(pdControl, tc) pdClient.AddReaction(pdapi.GetConfigActionType, func(action *pdapi.Action) (interface{}, error) { var replicas uint64 = 3 - return &pdapi.PDConfigFromAPI{ - Replication: &pdapi.PDReplicationConfig{ - MaxReplicas: &replicas, + return &pd.ServerConfig{ + Replication: pd.ReplicationConfig{ + MaxReplicas: replicas, }, }, nil }) if test.getStoresFn == nil { test.getStoresFn = func(action *pdapi.Action) (interface{}, error) { - store := &pdapi.StoreInfo{ - Store: &pdapi.MetaStore{ + store := pd.StoreInfo{ + Store: pd.MetaStore{ StateName: v1alpha1.TiKVStateUp, - Store: &metapb.Store{ - Address: fmt.Sprintf("%s-tikv-0", "basic"), - }, + Address: fmt.Sprintf("%s-tikv-0", "basic"), }, } - return &pdapi.StoresInfo{ + return pd.StoresInfo{ Count: 5, - Stores: []*pdapi.StoreInfo{store, store, store, store, store}, + Stores: []pd.StoreInfo{store, store, store, store, store}, }, nil } } diff --git a/pkg/manager/member/tikv_upgrader.go b/pkg/manager/member/tikv_upgrader.go index e2ed4366596..e0bcc1777e0 100644 --- a/pkg/manager/member/tikv_upgrader.go +++ b/pkg/manager/member/tikv_upgrader.go @@ -14,6 +14,7 @@ package member import ( + "context" "fmt" "strconv" "time" @@ -362,7 +363,7 @@ func (u *tikvUpgrader) beginEvictLeader(tc *v1alpha1.TidbCluster, storeID uint64 tc.Status.TiKV.Stores[strconv.Itoa(int(storeID))] = status } - err := controller.GetPDClient(u.deps.PDControl, tc).BeginEvictLeader(storeID) + err := pdapi.BeginEvictLeader(context.TODO(), controller.GetPDClient(u.deps.PDControl, tc), storeID) if err != nil { klog.Errorf("beginEvictLeader: failed to begin evict leader: %d, %s/%s, %v", storeID, ns, podName, err) @@ -429,20 +430,19 @@ func endEvictLeaderForAllStore(deps *controller.Dependencies, tc *v1alpha1.TidbC } } - pdcli := controller.GetPDClient(deps.PDControl, tc) - - scheduelrs, err := pdcli.GetEvictLeaderSchedulersForStores(storeIDs...) + pdClient := controller.GetPDClient(deps.PDControl, tc) + schedulers, err := pdapi.GetEvictLeaderSchedulersForStores(context.TODO(), pdClient, storeIDs...) if err != nil { return fmt.Errorf("get scheduler failed: %v", err) } - if len(scheduelrs) == 0 { + if len(schedulers) == 0 { klog.Infof("tikv: no evict leader scheduler exists for %s/%s", tc.Namespace, tc.Name) return nil } errs := make([]error, 0) - for storeID := range scheduelrs { - err := pdcli.EndEvictLeader(storeID) + for storeID := range schedulers { + err := pdapi.EndEvictLeader(context.TODO(), pdClient, storeID) if err != nil { klog.Errorf("tikv: failed to end evict leader for store: %d of %s/%s, error: %v", storeID, tc.Namespace, tc.Name, err) errs = append(errs, fmt.Errorf("end evict leader for store %d failed: %v", storeID, err)) @@ -478,7 +478,7 @@ func endEvictLeaderbyStoreID(deps *controller.Dependencies, tc *v1alpha1.TidbClu time.Sleep(5 * time.Second) } - err := controller.GetPDClient(deps.PDControl, tc).EndEvictLeader(storeID) + err := pdapi.EndEvictLeader(context.TODO(), controller.GetPDClient(deps.PDControl, tc), storeID) if err != nil { klog.Errorf("endEvictLeaderbyStoreID: failed to end evict leader for store: %d of %s/%s, error: %v", storeID, tc.Namespace, tc.Name, err) return err diff --git a/pkg/manager/member/tikv_upgrader_test.go b/pkg/manager/member/tikv_upgrader_test.go index 9dc8c34af4e..b6452ac01df 100644 --- a/pkg/manager/member/tikv_upgrader_test.go +++ b/pkg/manager/member/tikv_upgrader_test.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/manager/volumes" "github.com/pingcap/tidb-operator/pkg/pdapi" "github.com/pingcap/tidb-operator/pkg/tikvapi" + pd "github.com/tikv/pd/client/http" . "github.com/onsi/gomega" apps "k8s.io/api/apps/v1" @@ -118,10 +119,10 @@ func TestTiKVUpgraderUpgrade(t *testing.T) { tikvState = v1alpha1.TiKVStateUp } pdClient.AddReaction(pdapi.GetStoresActionType, func(action *pdapi.Action) (interface{}, error) { - storesInfo := &pdapi.StoresInfo{ - Stores: []*pdapi.StoreInfo{ + storesInfo := &pd.StoresInfo{ + Stores: []pd.StoreInfo{ { - Store: &pdapi.MetaStore{ + Store: pd.MetaStore{ StateName: tikvState, }, }, diff --git a/pkg/pdapi/fake_pdapi.go b/pkg/pdapi/fake_pdapi.go index f05faa106d3..f9c55dcd5eb 100644 --- a/pkg/pdapi/fake_pdapi.go +++ b/pkg/pdapi/fake_pdapi.go @@ -14,36 +14,39 @@ package pdapi import ( + "context" + "errors" "fmt" + "net/http" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + pd "github.com/tikv/pd/client/http" ) type ActionType string const ( - GetHealthActionType ActionType = "GetHealth" - GetConfigActionType ActionType = "GetConfig" - GetClusterActionType ActionType = "GetCluster" - GetMembersActionType ActionType = "GetMembers" - GetStoresActionType ActionType = "GetStores" - GetTombStoneStoresActionType ActionType = "GetTombStoneStores" - GetStoreActionType ActionType = "GetStore" - DeleteStoreActionType ActionType = "DeleteStore" - SetStoreStateActionType ActionType = "SetStoreState" - DeleteMemberByIDActionType ActionType = "DeleteMemberByID" - DeleteMemberActionType ActionType = "DeleteMember " - SetStoreLabelsActionType ActionType = "SetStoreLabels" - UpdateReplicationActionType ActionType = "UpdateReplicationConfig" - BeginEvictLeaderActionType ActionType = "BeginEvictLeader" - EndEvictLeaderActionType ActionType = "EndEvictLeader" - GetEvictLeaderSchedulersActionType ActionType = "GetEvictLeaderSchedulers" - GetEvictLeaderSchedulersForStoresActionType ActionType = "GetEvictLeaderSchedulersForStores" - GetPDLeaderActionType ActionType = "GetPDLeader" - TransferPDLeaderActionType ActionType = "TransferPDLeader" - GetAutoscalingPlansActionType ActionType = "GetAutoscalingPlans" - GetRecoveringMarkActionType ActionType = "GetRecoveringMark" + GetHealthActionType ActionType = "GetHealth" + GetConfigActionType ActionType = "GetConfig" + GetClusterActionType ActionType = "GetCluster" + GetMembersActionType ActionType = "GetMembers" + GetStoresActionType ActionType = "GetStores" + GetTombStoneStoresActionType ActionType = "GetTombStoneStores" + GetStoreActionType ActionType = "GetStore" + DeleteStoreActionType ActionType = "DeleteStore" + SetStoreStateActionType ActionType = "SetStoreState" + DeleteMemberByIDActionType ActionType = "DeleteMemberByID" + DeleteMemberActionType ActionType = "DeleteMember " + SetStoreLabelsActionType ActionType = "SetStoreLabels" + UpdateReplicationActionType ActionType = "UpdateReplicationConfig" + BeginEvictLeaderActionType ActionType = "BeginEvictLeader" + EndEvictLeaderActionType ActionType = "EndEvictLeader" + GetEvictLeaderSchedulersActionType ActionType = "GetEvictLeaderSchedulers" + GetPDLeaderActionType ActionType = "GetPDLeader" + TransferPDLeaderActionType ActionType = "TransferPDLeader" + GetAutoscalingPlansActionType ActionType = "GetAutoscalingPlans" + GetRecoveringMarkActionType ActionType = "GetRecoveringMark" ) type NotFoundReaction struct { @@ -58,14 +61,15 @@ type Action struct { ID uint64 Name string Labels map[string]string - Replication PDReplicationConfig + Replication pd.ReplicationConfig } type Reaction func(action *Action) (interface{}, error) // FakePDClient implements a fake version of PDClient. type FakePDClient struct { - reactions map[ActionType]Reaction + reactions map[ActionType]Reaction + schedulers []string } func NewFakePDClient() *FakePDClient { @@ -88,90 +92,311 @@ func (c *FakePDClient) fakeAPI(actionType ActionType, action *Action) (interface return nil, &NotFoundReaction{actionType} } -func (c *FakePDClient) GetHealth() (*HealthInfo, error) { +func (c *FakePDClient) WithCallerID(s string) pd.Client { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) WithRespHandler(f func(resp *http.Response, res interface{}) error) pd.Client { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) Close() { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetStoresByState(ctx context.Context, state metapb.StoreState) (*pd.StoresInfo, error) { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetScheduleConfig(ctx context.Context) (*pd.ScheduleConfig, error) { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) SetScheduleConfig(ctx context.Context, config *pd.ScheduleConfig) error { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetRecoveringMark(ctx context.Context) (bool, error) { action := &Action{} - result, err := c.fakeAPI(GetHealthActionType, action) + _, err := c.fakeAPI(GetRecoveringMarkActionType, action) if err != nil { - return nil, err + return false, err } - return result.(*HealthInfo), nil + + return true, nil } -func (c *FakePDClient) GetConfig() (*PDConfigFromAPI, error) { +func (c *FakePDClient) GetConfig(ctx context.Context) (*pd.ServerConfig, error) { action := &Action{} result, err := c.fakeAPI(GetConfigActionType, action) if err != nil { return nil, err } - return result.(*PDConfigFromAPI), nil + return result.(*pd.ServerConfig), nil } -func (c *FakePDClient) GetCluster() (*metapb.Cluster, error) { - action := &Action{} - result, err := c.fakeAPI(GetClusterActionType, action) +func (c *FakePDClient) GetStore(ctx context.Context, id uint64) (*pd.StoreInfo, error) { + action := &Action{ + ID: id, + } + result, err := c.fakeAPI(GetStoreActionType, action) if err != nil { return nil, err } - return result.(*metapb.Cluster), nil + return result.(*pd.StoreInfo), nil +} + +func (c *FakePDClient) SetStoreLabels(ctx context.Context, storeID uint64, labels map[string]string) (bool, error) { + if reaction, ok := c.reactions[SetStoreLabelsActionType]; ok { + action := &Action{ID: storeID, Labels: labels} + result, err := reaction(action) + return result.(bool), err + } + return true, nil +} + +func (c *FakePDClient) DeleteStore(ctx context.Context, id uint64) error { + if reaction, ok := c.reactions[DeleteStoreActionType]; ok { + action := &Action{ID: id} + _, err := reaction(action) + return err + } + return nil +} + +func (c *FakePDClient) SetStoreState(ctx context.Context, id uint64, s string) error { + if reaction, ok := c.reactions[SetStoreStateActionType]; ok { + action := &Action{ID: id} + _, err := reaction(action) + return err + } + return nil } -func (c *FakePDClient) GetMembers() (*MembersInfo, error) { +func (c *FakePDClient) GetEvictLeaderSchedulerConfig(ctx context.Context) (*pd.EvictLeaderSchedulerConfig, error) { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetRegionByID(ctx context.Context, u uint64) (*pd.RegionInfo, error) { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetRegionByKey(ctx context.Context, bytes []byte) (*pd.RegionInfo, error) { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetRegions(ctx context.Context) (*pd.RegionsInfo, error) { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetRegionsByKeyRange(ctx context.Context, keyRange *pd.KeyRange, i int) (*pd.RegionsInfo, error) { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetRegionsByStoreID(ctx context.Context, u uint64) (*pd.RegionsInfo, error) { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetRegionsReplicatedStateByKeyRange(ctx context.Context, keyRange *pd.KeyRange) (string, error) { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetHotReadRegions(ctx context.Context) (*pd.StoreHotPeersInfos, error) { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetHotWriteRegions(ctx context.Context) (*pd.StoreHotPeersInfos, error) { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetHistoryHotRegions(ctx context.Context, request *pd.HistoryHotRegionsRequest) (*pd.HistoryHotRegions, error) { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetRegionStatusByKeyRange(ctx context.Context, keyRange *pd.KeyRange, b bool) (*pd.RegionStats, error) { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetStores(ctx context.Context) (*pd.StoresInfo, error) { action := &Action{} - result, err := c.fakeAPI(GetMembersActionType, action) + result, err := c.fakeAPI(GetStoresActionType, action) if err != nil { return nil, err } - return result.(*MembersInfo), nil + return result.(*pd.StoresInfo), nil } -func (c *FakePDClient) GetStores() (*StoresInfo, error) { +func (c *FakePDClient) GetAllPlacementRuleBundles(ctx context.Context) ([]*pd.GroupBundle, error) { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetPlacementRuleBundleByGroup(ctx context.Context, s string) (*pd.GroupBundle, error) { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetPlacementRulesByGroup(ctx context.Context, s string) ([]*pd.Rule, error) { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) SetPlacementRule(ctx context.Context, rule *pd.Rule) error { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) SetPlacementRuleInBatch(ctx context.Context, ops []*pd.RuleOp) error { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) SetPlacementRuleBundles(ctx context.Context, bundles []*pd.GroupBundle, b bool) error { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) DeletePlacementRule(ctx context.Context, s string, s2 string) error { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetAllPlacementRuleGroups(ctx context.Context) ([]*pd.RuleGroup, error) { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetPlacementRuleGroupByID(ctx context.Context, s string) (*pd.RuleGroup, error) { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) SetPlacementRuleGroup(ctx context.Context, group *pd.RuleGroup) error { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) DeletePlacementRuleGroupByID(ctx context.Context, s string) error { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetAllRegionLabelRules(ctx context.Context) ([]*pd.LabelRule, error) { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetRegionLabelRulesByIDs(ctx context.Context, strings []string) ([]*pd.LabelRule, error) { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) SetRegionLabelRule(ctx context.Context, rule *pd.LabelRule) error { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) PatchRegionLabelRules(ctx context.Context, patch *pd.LabelRulePatch) error { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) AccelerateSchedule(ctx context.Context, keyRange *pd.KeyRange) error { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) AccelerateScheduleInBatch(ctx context.Context, ranges []*pd.KeyRange) error { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetMinResolvedTSByStoresIDs(ctx context.Context, uint64s []uint64) (uint64, map[uint64]uint64, error) { + //TODO implement me + panic("implement me") +} + +func (c *FakePDClient) GetHealth(ctx context.Context) (*pd.HealthInfo, error) { action := &Action{} - result, err := c.fakeAPI(GetStoresActionType, action) + result, err := c.fakeAPI(GetHealthActionType, action) if err != nil { return nil, err } - return result.(*StoresInfo), nil + return result.(*pd.HealthInfo), nil } -func (c *FakePDClient) GetTombStoneStores() (*StoresInfo, error) { +func (c *FakePDClient) GetMembers(ctx context.Context) (*pd.MembersInfo, error) { action := &Action{} - result, err := c.fakeAPI(GetTombStoneStoresActionType, action) + result, err := c.fakeAPI(GetMembersActionType, action) if err != nil { return nil, err } - return result.(*StoresInfo), nil + return result.(*pd.MembersInfo), nil } -func (c *FakePDClient) GetStore(id uint64) (*StoreInfo, error) { - action := &Action{ - ID: id, - } - result, err := c.fakeAPI(GetStoreActionType, action) +func (c *FakePDClient) GetCluster(ctx context.Context) (*metapb.Cluster, error) { + action := &Action{} + result, err := c.fakeAPI(GetClusterActionType, action) if err != nil { return nil, err } - return result.(*StoreInfo), nil + return result.(*metapb.Cluster), nil } -func (c *FakePDClient) DeleteStore(id uint64) error { - if reaction, ok := c.reactions[DeleteStoreActionType]; ok { - action := &Action{ID: id} +func (c *FakePDClient) GetPDLeader(ctx context.Context) (*pdpb.Member, error) { + if reaction, ok := c.reactions[GetPDLeaderActionType]; ok { + action := &Action{} + result, err := reaction(action) + return result.(*pdpb.Member), err + } + return nil, nil +} + +func (c *FakePDClient) GetAutoscalingPlans(ctx context.Context, strategy pd.Strategy) ([]pd.Plan, error) { + if reaction, ok := c.reactions[GetAutoscalingPlansActionType]; ok { + action := &Action{} + result, err := reaction(action) + return result.([]pd.Plan), err + } + return nil, nil +} + +func (c *FakePDClient) UpdateReplicationConfig(ctx context.Context, config pd.ReplicationConfig) error { + if reaction, ok := c.reactions[UpdateReplicationActionType]; ok { + action := &Action{Replication: config} _, err := reaction(action) return err } return nil } -func (c *FakePDClient) SetStoreState(id uint64, state string) error { - if reaction, ok := c.reactions[SetStoreStateActionType]; ok { - action := &Action{ID: id} +func (c *FakePDClient) TransferPDLeader(ctx context.Context, memberName string) error { + if reaction, ok := c.reactions[TransferPDLeaderActionType]; ok { + action := &Action{Name: memberName} _, err := reaction(action) return err } return nil } -func (c *FakePDClient) DeleteMemberByID(id uint64) error { +func (c *FakePDClient) DeleteMemberByID(ctx context.Context, id uint64) error { if reaction, ok := c.reactions[DeleteMemberByIDActionType]; ok { action := &Action{ID: id} _, err := reaction(action) @@ -180,7 +405,7 @@ func (c *FakePDClient) DeleteMemberByID(id uint64) error { return nil } -func (c *FakePDClient) DeleteMember(name string) error { +func (c *FakePDClient) DeleteMember(ctx context.Context, name string) error { if reaction, ok := c.reactions[DeleteMemberActionType]; ok { action := &Action{Name: name} _, err := reaction(action) @@ -189,42 +414,30 @@ func (c *FakePDClient) DeleteMember(name string) error { return nil } -// SetStoreLabels sets TiKV labels -func (c *FakePDClient) SetStoreLabels(storeID uint64, labels map[string]string) (bool, error) { - if reaction, ok := c.reactions[SetStoreLabelsActionType]; ok { - action := &Action{ID: storeID, Labels: labels} - result, err := reaction(action) - return result.(bool), err - } - return true, nil -} - -// UpdateReplicationConfig updates the replication config -func (c *FakePDClient) UpdateReplicationConfig(config PDReplicationConfig) error { - if reaction, ok := c.reactions[UpdateReplicationActionType]; ok { - action := &Action{Replication: config} - _, err := reaction(action) - return err +func (c *FakePDClient) CreateScheduler(ctx context.Context, name string, storeID uint64) error { + name = fmt.Sprintf("%s-%v", name, storeID) + for _, scheduler := range c.schedulers { + if scheduler == name { + return nil + } } + c.schedulers = append(c.schedulers, name) return nil } -func (c *FakePDClient) BeginEvictLeader(storeID uint64) error { - if reaction, ok := c.reactions[BeginEvictLeaderActionType]; ok { - action := &Action{ID: storeID} - _, err := reaction(action) - return err - } - return nil +func (c *FakePDClient) GetSchedulers(ctx context.Context) ([]string, error) { + return c.schedulers, nil } -func (c *FakePDClient) EndEvictLeader(storeID uint64) error { - if reaction, ok := c.reactions[EndEvictLeaderActionType]; ok { - action := &Action{ID: storeID} - _, err := reaction(action) - return err +func (c *FakePDClient) DeleteScheduler(ctx context.Context, name string, storeID uint64) error { + name = fmt.Sprintf("%s-%v", name, storeID) + for i := 0; i < len(c.schedulers); i++ { + if c.schedulers[i] == name { + c.schedulers = append(c.schedulers[:i], c.schedulers[i+1:]...) + return nil + } } - return nil + return errors.New("scheduler not found") } func (c *FakePDClient) GetEvictLeaderSchedulers() ([]string, error) { @@ -244,40 +457,3 @@ func (c *FakePDClient) GetEvictLeaderSchedulersForStores(storeIDs ...uint64) (ma } return nil, nil } - -func (c *FakePDClient) GetPDLeader() (*pdpb.Member, error) { - if reaction, ok := c.reactions[GetPDLeaderActionType]; ok { - action := &Action{} - result, err := reaction(action) - return result.(*pdpb.Member), err - } - return nil, nil -} - -func (c *FakePDClient) TransferPDLeader(memberName string) error { - if reaction, ok := c.reactions[TransferPDLeaderActionType]; ok { - action := &Action{Name: memberName} - _, err := reaction(action) - return err - } - return nil -} - -func (c *FakePDClient) GetAutoscalingPlans(strategy Strategy) ([]Plan, error) { - if reaction, ok := c.reactions[GetAutoscalingPlansActionType]; ok { - action := &Action{} - result, err := reaction(action) - return result.([]Plan), err - } - return nil, nil -} - -func (c *FakePDClient) GetRecoveringMark() (bool, error) { - action := &Action{} - _, err := c.fakeAPI(GetRecoveringMarkActionType, action) - if err != nil { - return false, err - } - - return true, nil -} diff --git a/pkg/pdapi/pd_config.go b/pkg/pdapi/pd_config.go deleted file mode 100644 index 03549666817..00000000000 --- a/pkg/pdapi/pd_config.go +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package pdapi - -import ( - "strconv" - "strings" -) - -// PDConfigFromAPI is the configuration from PD API -// +k8s:openapi-gen=true -type PDConfigFromAPI struct { - - // Log related config. - Log *PDLogConfig `toml:"log,omitempty" json:"log,omitempty"` - - // Immutable, change should be made through pd-ctl after cluster creation - Schedule *PDScheduleConfig `toml:"schedule,omitempty" json:"schedule,omitempty"` - - // Immutable, change should be made through pd-ctl after cluster creation - Replication *PDReplicationConfig `toml:"replication,omitempty" json:"replication,omitempty"` -} - -// PDLogConfig serializes log related config in toml/json. -// +k8s:openapi-gen=true -type PDLogConfig struct { - // Log level. - // Optional: Defaults to info - Level string `toml:"level,omitempty" json:"level,omitempty"` - // Log format. one of json, text, or console. - Format string `toml:"format,omitempty" json:"format,omitempty"` - // Disable automatic timestamps in output. - DisableTimestamp *bool `toml:"disable-timestamp,omitempty" json:"disable-timestamp,omitempty"` - // File log config. - File *FileLogConfig `toml:"file,omitempty" json:"file,omitempty"` - // Development puts the logger in development mode, which changes the - // behavior of DPanicLevel and takes stacktraces more liberally. - Development *bool `toml:"development,omitempty" json:"development,omitempty"` - // DisableCaller stops annotating logs with the calling function's file - // name and line number. By default, all logs are annotated. - DisableCaller *bool `toml:"disable-caller,omitempty" json:"disable-caller,omitempty"` - // DisableStacktrace completely disables automatic stacktrace capturing. By - // default, stacktraces are captured for WarnLevel and above logs in - // development and ErrorLevel and above in production. - DisableStacktrace *bool `toml:"disable-stacktrace,omitempty" json:"disable-stacktrace,omitempty"` - // DisableErrorVerbose stops annotating logs with the full verbose error - // message. - DisableErrorVerbose *bool `toml:"disable-error-verbose,omitempty" json:"disable-error-verbose,omitempty"` -} - -// PDReplicationConfig is the replication configuration. -// +k8s:openapi-gen=true -type PDReplicationConfig struct { - // MaxReplicas is the number of replicas for each region. - // Immutable, change should be made through pd-ctl after cluster creation - // Optional: Defaults to 3 - MaxReplicas *uint64 `toml:"max-replicas,omitempty" json:"max-replicas,omitempty"` - - // The label keys specified the location of a store. - // The placement priorities is implied by the order of label keys. - // For example, ["zone", "rack"] means that we should place replicas to - // different zones first, then to different racks if we don't have enough zones. - // Immutable, change should be made through pd-ctl after cluster creation - // +k8s:openapi-gen=false - LocationLabels StringSlice `toml:"location-labels,omitempty" json:"location-labels,omitempty"` - // StrictlyMatchLabel strictly checks if the label of TiKV is matched with LocaltionLabels. - // Immutable, change should be made through pd-ctl after cluster creation. - // Imported from v3.1.0 - StrictlyMatchLabel *bool `toml:"strictly-match-label,omitempty" json:"strictly-match-label,string,omitempty"` - - // When PlacementRules feature is enabled. MaxReplicas and LocationLabels are not used anymore. - EnablePlacementRules *bool `toml:"enable-placement-rules" json:"enable-placement-rules,string,omitempty"` -} - -// ScheduleConfig is the schedule configuration. -// +k8s:openapi-gen=true -type PDScheduleConfig struct { - // If the snapshot count of one store is greater than this value, - // it will never be used as a source or target store. - // Immutable, change should be made through pd-ctl after cluster creation - // Optional: Defaults to 3 - MaxSnapshotCount *uint64 `toml:"max-snapshot-count,omitempty" json:"max-snapshot-count,omitempty"` - // Immutable, change should be made through pd-ctl after cluster creation - // Optional: Defaults to 16 - MaxPendingPeerCount *uint64 `toml:"max-pending-peer-count,omitempty" json:"max-pending-peer-count,omitempty"` - // If both the size of region is smaller than MaxMergeRegionSize - // and the number of rows in region is smaller than MaxMergeRegionKeys, - // it will try to merge with adjacent regions. - // Immutable, change should be made through pd-ctl after cluster creation - // Optional: Defaults to 20 - MaxMergeRegionSize *uint64 `toml:"max-merge-region-size,omitempty" json:"max-merge-region-size,omitempty"` - // Immutable, change should be made through pd-ctl after cluster creation - // Optional: Defaults to 200000 - MaxMergeRegionKeys *uint64 `toml:"max-merge-region-keys,omitempty" json:"max-merge-region-keys,omitempty"` - // SplitMergeInterval is the minimum interval time to permit merge after split. - // Immutable, change should be made through pd-ctl after cluster creation - // Optional: Defaults to 1h - SplitMergeInterval string `toml:"split-merge-interval,omitempty" json:"split-merge-interval,omitempty"` - // PatrolRegionInterval is the interval for scanning region during patrol. - // Immutable, change should be made through pd-ctl after cluster creation - PatrolRegionInterval string `toml:"patrol-region-interval,omitempty" json:"patrol-region-interval,omitempty"` - // MaxStoreDownTime is the max duration after which - // a store will be considered to be down if it hasn't reported heartbeats. - // Immutable, change should be made through pd-ctl after cluster creation - // Optional: Defaults to 30m - MaxStoreDownTime string `toml:"max-store-down-time,omitempty" json:"max-store-down-time,omitempty"` - // LeaderScheduleLimit is the max coexist leader schedules. - // Immutable, change should be made through pd-ctl after cluster creation. - // Optional: Defaults to 4. - // Imported from v3.1.0 - LeaderScheduleLimit *uint64 `toml:"leader-schedule-limit,omitempty" json:"leader-schedule-limit,omitempty"` - // RegionScheduleLimit is the max coexist region schedules. - // Immutable, change should be made through pd-ctl after cluster creation - // Optional: Defaults to 2048 - RegionScheduleLimit *uint64 `toml:"region-schedule-limit,omitempty" json:"region-schedule-limit,omitempty"` - // ReplicaScheduleLimit is the max coexist replica schedules. - // Immutable, change should be made through pd-ctl after cluster creation - // Optional: Defaults to 64 - ReplicaScheduleLimit *uint64 `toml:"replica-schedule-limit,omitempty" json:"replica-schedule-limit,omitempty"` - // MergeScheduleLimit is the max coexist merge schedules. - // Immutable, change should be made through pd-ctl after cluster creation - // Optional: Defaults to 8 - MergeScheduleLimit *uint64 `toml:"merge-schedule-limit,omitempty" json:"merge-schedule-limit,omitempty"` - // HotRegionScheduleLimit is the max coexist hot region schedules. - // Immutable, change should be made through pd-ctl after cluster creation - // Optional: Defaults to 4 - HotRegionScheduleLimit *uint64 `toml:"hot-region-schedule-limit,omitempty" json:"hot-region-schedule-limit,omitempty"` - // HotRegionCacheHitThreshold is the cache hits threshold of the hot region. - // If the number of times a region hits the hot cache is greater than this - // threshold, it is considered a hot region. - // Immutable, change should be made through pd-ctl after cluster creation - HotRegionCacheHitsThreshold *uint64 `toml:"hot-region-cache-hits-threshold,omitempty" json:"hot-region-cache-hits-threshold,omitempty"` - // TolerantSizeRatio is the ratio of buffer size for balance scheduler. - // Immutable, change should be made through pd-ctl after cluster creation. - // Imported from v3.1.0 - TolerantSizeRatio *float64 `toml:"tolerant-size-ratio,omitempty" json:"tolerant-size-ratio,omitempty"` - // - // high space stage transition stage low space stage - // |--------------------|-----------------------------|-------------------------| - // ^ ^ ^ ^ - // 0 HighSpaceRatio * capacity LowSpaceRatio * capacity capacity - // - // LowSpaceRatio is the lowest usage ratio of store which regraded as low space. - // When in low space, store region score increases to very large and varies inversely with available size. - // Immutable, change should be made through pd-ctl after cluster creation - LowSpaceRatio *float64 `toml:"low-space-ratio,omitempty" json:"low-space-ratio,omitempty"` - // HighSpaceRatio is the highest usage ratio of store which regraded as high space. - // High space means there is a lot of spare capacity, and store region score varies directly with used size. - // Immutable, change should be made through pd-ctl after cluster creation - HighSpaceRatio *float64 `toml:"high-space-ratio,omitempty" json:"high-space-ratio,omitempty"` - // DisableLearner is the option to disable using AddLearnerNode instead of AddNode - // Immutable, change should be made through pd-ctl after cluster creation - DisableLearner *bool `toml:"disable-raft-learner,omitempty" json:"disable-raft-learner,string,omitempty"` - - // DisableRemoveDownReplica is the option to prevent replica checker from - // removing down replicas. - // Immutable, change should be made through pd-ctl after cluster creation - DisableRemoveDownReplica *bool `toml:"disable-remove-down-replica,omitempty" json:"disable-remove-down-replica,string,omitempty"` - // DisableReplaceOfflineReplica is the option to prevent replica checker from - // repalcing offline replicas. - // Immutable, change should be made through pd-ctl after cluster creation - DisableReplaceOfflineReplica *bool `toml:"disable-replace-offline-replica,omitempty" json:"disable-replace-offline-replica,string,omitempty"` - // DisableMakeUpReplica is the option to prevent replica checker from making up - // replicas when replica count is less than expected. - // Immutable, change should be made through pd-ctl after cluster creation - DisableMakeUpReplica *bool `toml:"disable-make-up-replica,omitempty" json:"disable-make-up-replica,string,omitempty"` - // DisableRemoveExtraReplica is the option to prevent replica checker from - // removing extra replicas. - // Immutable, change should be made through pd-ctl after cluster creation - DisableRemoveExtraReplica *bool `toml:"disable-remove-extra-replica,omitempty" json:"disable-remove-extra-replica,string,omitempty"` - // DisableLocationReplacement is the option to prevent replica checker from - // moving replica to a better location. - // Immutable, change should be made through pd-ctl after cluster creation - DisableLocationReplacement *bool `toml:"disable-location-replacement,omitempty" json:"disable-location-replacement,string,omitempty"` - // DisableNamespaceRelocation is the option to prevent namespace checker - // from moving replica to the target namespace. - // Immutable, change should be made through pd-ctl after cluster creation - DisableNamespaceRelocation *bool `toml:"disable-namespace-relocation,omitempty" json:"disable-namespace-relocation,string,omitempty"` - - // Schedulers support for loding customized schedulers - // Immutable, change should be made through pd-ctl after cluster creation - Schedulers *PDSchedulerConfigs `toml:"schedulers,omitempty" json:"schedulers-v2,omitempty"` // json v2 is for the sake of compatible upgrade - - // Only used to display - SchedulersPayload map[string]interface{} `toml:"schedulers-payload" json:"schedulers-payload,omitempty"` - - // EnableOneWayMerge is the option to enable one way merge. This means a Region can only be merged into the next region of it. - // Imported from v3.1.0 - EnableOneWayMerge *bool `toml:"enable-one-way-merge" json:"enable-one-way-merge,string,omitempty"` - // EnableCrossTableMerge is the option to enable cross table merge. This means two Regions can be merged with different table IDs. - // This option only works when key type is "table". - // Imported from v3.1.0 - EnableCrossTableMerge *bool `toml:"enable-cross-table-merge" json:"enable-cross-table-merge,string,omitempty"` -} - -type PDSchedulerConfigs []PDSchedulerConfig - -// PDSchedulerConfig is customized scheduler configuration -// +k8s:openapi-gen=true -type PDSchedulerConfig struct { - // Immutable, change should be made through pd-ctl after cluster creation - Type string `toml:"type,omitempty" json:"type,omitempty"` - // Immutable, change should be made through pd-ctl after cluster creation - Args []string `toml:"args,omitempty" json:"args,omitempty"` - // Immutable, change should be made through pd-ctl after cluster creation - Disable *bool `toml:"disable,omitempty" json:"disable,omitempty"` -} - -// PDStoreLabel is the config item of LabelPropertyConfig. -// +k8s:openapi-gen=true -type PDStoreLabel struct { - Key string `toml:"key,omitempty" json:"key,omitempty"` - Value string `toml:"value,omitempty" json:"value,omitempty"` -} - -type PDStoreLabels []PDStoreLabel - -type PDLabelPropertyConfig map[string]PDStoreLabels - -// +k8s:openapi-gen=true -type FileLogConfig struct { - // Log filename, leave empty to disable file log. - Filename string `toml:"filename,omitempty" json:"filename,omitempty"` - // Is log rotate enabled. - LogRotate bool `toml:"log-rotate,omitempty" json:"log-rotate,omitempty"` - // Max size for a single file, in MB. - MaxSize int `toml:"max-size,omitempty" json:"max-size,omitempty"` - // Max log keep days, default is never deleting. - MaxDays int `toml:"max-days,omitempty" json:"max-days,omitempty"` - // Maximum number of old log files to retain. - MaxBackups int `toml:"max-backups,omitempty" json:"max-backups,omitempty"` -} - -// StringSlice is more friendly to json encode/decode -type StringSlice []string - -// MarshalJSON returns the size as a JSON string. -func (s StringSlice) MarshalJSON() ([]byte, error) { - return []byte(strconv.Quote(strings.Join(s, ","))), nil -} - -// UnmarshalJSON parses a JSON string into the bytesize. -func (s *StringSlice) UnmarshalJSON(text []byte) error { - data, err := strconv.Unquote(string(text)) - if err != nil { - return err - } - if len(data) == 0 { - *s = nil - return nil - } - *s = strings.Split(data, ",") - return nil -} - -// evictLeaderSchedulerConfig holds configuration for evict leader -// https://github.com/pingcap/pd/blob/b21855a3aeb787c71b0819743059e432be217dcd/server/schedulers/evict_leader.go#L81-L86 -// note that we use `interface{}` as the type of value because we don't care -// about the value for now -type evictLeaderSchedulerConfig struct { - StoreIDWithRanges map[uint64]interface{} `json:"store-id-ranges"` -} diff --git a/pkg/pdapi/pd_control.go b/pkg/pdapi/pd_control.go index cdd0276c0eb..b997c9a63e9 100644 --- a/pkg/pdapi/pd_control.go +++ b/pkg/pdapi/pd_control.go @@ -16,11 +16,11 @@ package pdapi import ( "crypto/tls" "fmt" - "net/http" "sync" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/util" + pd "github.com/tikv/pd/client/http" "k8s.io/client-go/kubernetes" corelisterv1 "k8s.io/client-go/listers/core/v1" "k8s.io/klog/v2" @@ -73,7 +73,7 @@ func UseHeadlessService(headless bool) Option { // PDControlInterface is an interface that knows how to manage and get tidb cluster's PD client type PDControlInterface interface { // GetPDClient provides PDClient of the tidb cluster. - GetPDClient(namespace Namespace, tcName string, tlsEnabled bool, opts ...Option) PDClient + GetPDClient(namespace Namespace, tcName string, tlsEnabled bool, opts ...Option) pd.Client // GetPDEtcdClient provides PD etcd Client of the tidb cluster. GetPDEtcdClient(namespace Namespace, tcName string, tlsEnabled bool, opts ...Option) (PDEtcdClient, error) // GetEndpoints return the endpoints and client tls.Config to connection pd/etcd. @@ -141,7 +141,7 @@ type defaultPDControl struct { secretLister corelisterv1.SecretLister mutex sync.Mutex - pdClients map[string]PDClient + pdClients map[string]pd.Client etcdmutex sync.Mutex pdEtcdClients map[string]PDEtcdClient @@ -157,12 +157,12 @@ func (c *noOpClose) Close() error { // NewDefaultPDControl returns a defaultPDControl instance func NewDefaultPDControl(secretLister corelisterv1.SecretLister) PDControlInterface { - return &defaultPDControl{secretLister: secretLister, pdClients: map[string]PDClient{}, pdEtcdClients: map[string]PDEtcdClient{}} + return &defaultPDControl{secretLister: secretLister, pdClients: map[string]pd.Client{}, pdEtcdClients: map[string]PDEtcdClient{}} } -// NewDefaultPDControl returns a defaultPDControl instance +// NewDefaultPDControlByCli returns a defaultPDControl instance func NewDefaultPDControlByCli(kubeCli kubernetes.Interface) PDControlInterface { - return &defaultPDControl{pdClients: map[string]PDClient{}, pdEtcdClients: map[string]PDEtcdClient{}} + return &defaultPDControl{pdClients: map[string]pd.Client{}, pdEtcdClients: map[string]PDEtcdClient{}} } func (pdc *defaultPDControl) GetEndpoints(namespace Namespace, tcName string, tlsEnabled bool, opts ...Option) (endpoints []string, tlsConfig *tls.Config, err error) { @@ -216,7 +216,7 @@ func (pdc *defaultPDControl) GetPDEtcdClient(namespace Namespace, tcName string, } // GetPDClient provides a PDClient of real pd cluster, if the PDClient not existing, it will create new one. -func (pdc *defaultPDControl) GetPDClient(namespace Namespace, tcName string, tlsEnabled bool, opts ...Option) PDClient { +func (pdc *defaultPDControl) GetPDClient(namespace Namespace, tcName string, tlsEnabled bool, opts ...Option) pd.Client { config := &clientConfig{} config.tlsEnable = tlsEnabled @@ -231,13 +231,13 @@ func (pdc *defaultPDControl) GetPDClient(namespace Namespace, tcName string, tls tlsConfig, err := GetTLSConfig(pdc.secretLister, config.tlsSecretNamespace, config.tlsSecretName) if err != nil { klog.Errorf("Unable to get tls config for tidb cluster %q in %s, pd client may not work: %v", tcName, namespace, err) - return &pdClient{url: config.clientURL, httpClient: &http.Client{Timeout: DefaultTimeout}} + return pd.NewClient([]string{config.clientURL}) } - return NewPDClient(config.clientURL, DefaultTimeout, tlsConfig) + return pd.NewClient([]string{config.clientURL}, pd.WithTLSConfig(tlsConfig)) } if _, ok := pdc.pdClients[config.clientKey]; !ok { - pdc.pdClients[config.clientKey] = NewPDClient(config.clientURL, DefaultTimeout, nil) + pdc.pdClients[config.clientKey] = pd.NewClient([]string{config.clientURL}) } return pdc.pdClients[config.clientKey] } @@ -290,18 +290,18 @@ type FakePDControl struct { func NewFakePDControl(secretLister corelisterv1.SecretLister) *FakePDControl { return &FakePDControl{ - defaultPDControl{secretLister: secretLister, pdClients: map[string]PDClient{}}, + defaultPDControl{secretLister: secretLister, pdClients: map[string]pd.Client{}}, } } -func (fpc *FakePDControl) SetPDClient(namespace Namespace, tcName string, pdclient PDClient) { +func (fpc *FakePDControl) SetPDClient(namespace Namespace, tcName string, pdclient pd.Client) { fpc.defaultPDControl.pdClients[genClientKey("http", namespace, tcName, "")] = pdclient } -func (fpc *FakePDControl) SetPDClientWithClusterDomain(namespace Namespace, tcName string, tcClusterDomain string, pdclient PDClient) { +func (fpc *FakePDControl) SetPDClientWithClusterDomain(namespace Namespace, tcName string, tcClusterDomain string, pdclient pd.Client) { fpc.defaultPDControl.pdClients[genClientKey("http", namespace, tcName, tcClusterDomain)] = pdclient } -func (fpc *FakePDControl) SetPDClientWithAddress(peerURL string, pdclient PDClient) { +func (fpc *FakePDControl) SetPDClientWithAddress(peerURL string, pdclient pd.Client) { fpc.defaultPDControl.pdClients[peerURL] = pdclient } diff --git a/pkg/pdapi/pdapi.go b/pkg/pdapi/pdapi.go index 899bc7e39c4..ece802b8a70 100644 --- a/pkg/pdapi/pdapi.go +++ b/pkg/pdapi/pdapi.go @@ -14,28 +14,17 @@ package pdapi import ( - "bytes" "crypto/tls" - "encoding/json" "fmt" - "io" - "net/http" - "strings" "time" - "github.com/pingcap/kvproto/pkg/metapb" - "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/tidb-operator/pkg/util/crypto" - httputil "github.com/pingcap/tidb-operator/pkg/util/http" - "github.com/tikv/pd/pkg/typeutil" corelisterv1 "k8s.io/client-go/listers/core/v1" - "k8s.io/klog/v2" ) const ( DefaultTimeout = 5 * time.Second evictSchedulerLeader = "evict-leader-scheduler" - tiKVNotBootstrapped = `TiKV cluster not bootstrapped, please start TiKV first"` ) // GetTLSConfig returns *tls.Config for given TiDB cluster. @@ -48,713 +37,6 @@ func GetTLSConfig(secretLister corelisterv1.SecretLister, namespace Namespace, s return crypto.LoadTlsConfigFromSecret(secret) } -// PDClient provides pd server's api -type PDClient interface { - // GetHealth returns the PD's health info - GetHealth() (*HealthInfo, error) - // GetConfig returns PD's config - GetConfig() (*PDConfigFromAPI, error) - // GetCluster returns used when syncing pod labels. - GetCluster() (*metapb.Cluster, error) - // GetMembers returns all PD members from cluster - GetMembers() (*MembersInfo, error) - // GetStores lists all TiKV stores from cluster - GetStores() (*StoresInfo, error) - // GetTombStoneStores lists all tombstone stores from cluster - GetTombStoneStores() (*StoresInfo, error) - // GetStore gets a TiKV store for a specific store id from cluster - GetStore(storeID uint64) (*StoreInfo, error) - // SetStoreLabels compares store labels with node labels - // for historic reasons, PD stores TiKV labels as []*StoreLabel which is a key-value pair slice - SetStoreLabels(storeID uint64, labels map[string]string) (bool, error) - // UpdateReplicationConfig updates the replication config - UpdateReplicationConfig(config PDReplicationConfig) error - // DeleteStore deletes a TiKV store from cluster - DeleteStore(storeID uint64) error - // SetStoreState sets store to specified state. - SetStoreState(storeID uint64, state string) error - // DeleteMember deletes a PD member from cluster - DeleteMember(name string) error - // DeleteMemberByID deletes a PD member from cluster - DeleteMemberByID(memberID uint64) error - // BeginEvictLeader initiates leader eviction for a storeID. - // This is used when upgrading a pod. - BeginEvictLeader(storeID uint64) error - // EndEvictLeader is used at the end of pod upgrade. - EndEvictLeader(storeID uint64) error - // GetEvictLeaderSchedulers gets schedulers of evict leader - GetEvictLeaderSchedulers() ([]string, error) - // GetEvictLeaderSchedulersForStores gets schedulers of evict leader for given stores - GetEvictLeaderSchedulersForStores(storeIDs ...uint64) (map[uint64]string, error) - // GetPDLeader returns pd leader - GetPDLeader() (*pdpb.Member, error) - // TransferPDLeader transfers pd leader to specified member - TransferPDLeader(name string) error - // GetAutoscalingPlans returns the scaling plan for the cluster - GetAutoscalingPlans(strategy Strategy) ([]Plan, error) - // GetRecoveringMark return the pd recovering mark - GetRecoveringMark() (bool, error) -} - -var ( - healthPrefix = "pd/api/v1/health" - membersPrefix = "pd/api/v1/members" - storesPrefix = "pd/api/v1/stores" - storePrefix = "pd/api/v1/store" - configPrefix = "pd/api/v1/config" - clusterIDPrefix = "pd/api/v1/cluster" - schedulersPrefix = "pd/api/v1/schedulers" - pdLeaderPrefix = "pd/api/v1/leader" - pdLeaderTransferPrefix = "pd/api/v1/leader/transfer" - pdReplicationPrefix = "pd/api/v1/config/replicate" - // evictLeaderSchedulerConfigPrefix is the prefix of evict-leader-scheduler - // config API, available since PD v3.1.0. - evictLeaderSchedulerConfigPrefix = "pd/api/v1/scheduler-config/evict-leader-scheduler/list" - autoscalingPrefix = "autoscaling" - recoveringMarkPrefix = "pd/api/v1/admin/cluster/markers/snapshot-recovering" -) - -// pdClient is default implementation of PDClient -type pdClient struct { - url string - httpClient *http.Client -} - -// NewPDClient returns a new PDClient -func NewPDClient(url string, timeout time.Duration, tlsConfig *tls.Config) PDClient { - var disableKeepalive bool - if tlsConfig != nil { - disableKeepalive = true - } - return &pdClient{ - url: url, - httpClient: &http.Client{ - Timeout: timeout, - Transport: &http.Transport{TLSClientConfig: tlsConfig, DisableKeepAlives: disableKeepalive}, - }, - } -} - -// following struct definitions are copied from github.com/pingcap/pd/server/api/store -// these are not exported by that package - -// HealthInfo define PD's healthy info -type HealthInfo struct { - Healths []MemberHealth -} - -// MemberHealth define a pd member's healthy info -type MemberHealth struct { - Name string `json:"name"` - MemberID uint64 `json:"member_id"` - ClientUrls []string `json:"client_urls"` - Health bool `json:"health"` -} - -// MetaStore is TiKV store status defined in protobuf -type MetaStore struct { - *metapb.Store - StateName string `json:"state_name"` -} - -// StoreStatus is TiKV store status returned from PD RESTful interface -type StoreStatus struct { - Capacity typeutil.ByteSize `json:"capacity"` - Available typeutil.ByteSize `json:"available"` - LeaderCount int `json:"leader_count"` - RegionCount int `json:"region_count"` - SendingSnapCount uint32 `json:"sending_snap_count"` - ReceivingSnapCount uint32 `json:"receiving_snap_count"` - ApplyingSnapCount uint32 `json:"applying_snap_count"` - IsBusy bool `json:"is_busy"` - - StartTS time.Time `json:"start_ts"` - LastHeartbeatTS time.Time `json:"last_heartbeat_ts"` - Uptime typeutil.Duration `json:"uptime"` -} - -// StoreInfo is a single store info returned from PD RESTful interface -type StoreInfo struct { - Store *MetaStore `json:"store"` - Status *StoreStatus `json:"status"` -} - -// StoresInfo is stores info returned from PD RESTful interface -type StoresInfo struct { - Count int `json:"count"` - Stores []*StoreInfo `json:"stores"` -} - -// MembersInfo is PD members info returned from PD RESTful interface -// type Members map[string][]*pdpb.Member -type MembersInfo struct { - Header *pdpb.ResponseHeader `json:"header,omitempty"` - Members []*pdpb.Member `json:"members,omitempty"` - Leader *pdpb.Member `json:"leader,omitempty"` - EtcdLeader *pdpb.Member `json:"etcd_leader,omitempty"` -} - -// below copied from github.com/tikv/pd/pkg/autoscaling - -// Strategy within an HTTP request provides rules and resources to help make decision for auto scaling. -type Strategy struct { - Rules []*Rule `json:"rules"` - Resources []*Resource `json:"resources"` -} - -// Rule is a set of constraints for a kind of component. -type Rule struct { - Component string `json:"component"` - CPURule *CPURule `json:"cpu_rule,omitempty"` - StorageRule *StorageRule `json:"storage_rule,omitempty"` -} - -// CPURule is the constraints about CPU. -type CPURule struct { - MaxThreshold float64 `json:"max_threshold"` - MinThreshold float64 `json:"min_threshold"` - ResourceTypes []string `json:"resource_types"` -} - -// StorageRule is the constraints about storage. -type StorageRule struct { - MinThreshold float64 `json:"min_threshold"` - ResourceTypes []string `json:"resource_types"` -} - -// Resource represents a kind of resource set including CPU, memory, storage. -type Resource struct { - ResourceType string `json:"resource_type"` - // The basic unit of CPU is milli-core. - CPU uint64 `json:"cpu"` - // The basic unit of memory is byte. - Memory uint64 `json:"memory"` - // The basic unit of storage is byte. - Storage uint64 `json:"storage"` - // If count is not set, it indicates no limit. - Count *uint64 `json:"count,omitempty"` -} - -// Plan is the final result of auto scaling, which indicates how to scale in or scale out. -type Plan struct { - Component string `json:"component"` - Count uint64 `json:"count"` - ResourceType string `json:"resource_type"` - Labels map[string]string `json:"labels"` -} - -type schedulerInfo struct { - Name string `json:"name"` - StoreID uint64 `json:"store_id"` -} - -type RecoveringMark struct { - Mark bool `json:"marked"` -} - -func (c *pdClient) GetHealth() (*HealthInfo, error) { - apiURL := fmt.Sprintf("%s/%s", c.url, healthPrefix) - body, err := httputil.GetBodyOK(c.httpClient, apiURL) - if err != nil { - return nil, err - } - var healths []MemberHealth - err = json.Unmarshal(body, &healths) - if err != nil { - return nil, err - } - return &HealthInfo{ - healths, - }, nil -} - -func (c *pdClient) GetConfig() (*PDConfigFromAPI, error) { - apiURL := fmt.Sprintf("%s/%s", c.url, configPrefix) - body, err := httputil.GetBodyOK(c.httpClient, apiURL) - if err != nil { - return nil, err - } - config := &PDConfigFromAPI{} - err = json.Unmarshal(body, config) - if err != nil { - return nil, err - } - return config, nil -} - -func (c *pdClient) GetCluster() (*metapb.Cluster, error) { - apiURL := fmt.Sprintf("%s/%s", c.url, clusterIDPrefix) - body, err := httputil.GetBodyOK(c.httpClient, apiURL) - if err != nil { - return nil, err - } - cluster := &metapb.Cluster{} - err = json.Unmarshal(body, cluster) - if err != nil { - return nil, err - } - return cluster, nil -} - -func (c *pdClient) GetMembers() (*MembersInfo, error) { - apiURL := fmt.Sprintf("%s/%s", c.url, membersPrefix) - body, err := httputil.GetBodyOK(c.httpClient, apiURL) - if err != nil { - return nil, err - } - members := &MembersInfo{} - err = json.Unmarshal(body, members) - if err != nil { - return nil, err - } - return members, nil -} - -func (c *pdClient) getStores(apiURL string) (*StoresInfo, error) { - body, err := httputil.GetBodyOK(c.httpClient, apiURL) - if err != nil { - return nil, err - } - storesInfo := &StoresInfo{} - err = json.Unmarshal(body, storesInfo) - if err != nil { - return nil, err - } - return storesInfo, nil -} - -func (c *pdClient) GetStores() (*StoresInfo, error) { - storesInfo, err := c.getStores(fmt.Sprintf("%s/%s", c.url, storesPrefix)) - if err != nil { - if strings.HasSuffix(err.Error(), tiKVNotBootstrapped+"\n") { - err = TiKVNotBootstrappedErrorf(err.Error()) - } - return nil, err - } - return storesInfo, nil -} - -func (c *pdClient) GetTombStoneStores() (*StoresInfo, error) { - return c.getStores(fmt.Sprintf("%s/%s?state=%d", c.url, storesPrefix, metapb.StoreState_Tombstone)) -} - -func (c *pdClient) GetStore(storeID uint64) (*StoreInfo, error) { - apiURL := fmt.Sprintf("%s/%s/%d", c.url, storePrefix, storeID) - body, err := httputil.GetBodyOK(c.httpClient, apiURL) - if err != nil { - return nil, err - } - storeInfo := &StoreInfo{} - err = json.Unmarshal(body, storeInfo) - if err != nil { - return nil, err - } - return storeInfo, nil -} - -func (c *pdClient) DeleteStore(storeID uint64) error { - var exist bool - stores, err := c.GetStores() - if err != nil { - return err - } - for _, store := range stores.Stores { - if store.Store.GetId() == storeID { - exist = true - break - } - } - if !exist { - return nil - } - apiURL := fmt.Sprintf("%s/%s/%d", c.url, storePrefix, storeID) - req, err := http.NewRequest("DELETE", apiURL, nil) - if err != nil { - return err - } - res, err := c.httpClient.Do(req) - if err != nil { - return err - } - defer httputil.DeferClose(res.Body) - - // Remove an offline store should return http.StatusOK - if res.StatusCode == http.StatusOK || res.StatusCode == http.StatusNotFound { - return nil - } - body, err := io.ReadAll(res.Body) - if err != nil { - return err - } - - return fmt.Errorf("failed to delete store %d: %v", storeID, string(body)) -} - -// SetStoreState sets store to specified state. -func (c *pdClient) SetStoreState(storeID uint64, state string) error { - apiURL := fmt.Sprintf("%s/%s/%d/state?state=%s", c.url, storePrefix, storeID, state) - req, err := http.NewRequest("POST", apiURL, nil) - if err != nil { - return err - } - res, err := c.httpClient.Do(req) - if err != nil { - return err - } - defer httputil.DeferClose(res.Body) - - if res.StatusCode == http.StatusOK || res.StatusCode == http.StatusNotFound { - return nil - } - body, err := io.ReadAll(res.Body) - if err != nil { - return err - } - - return fmt.Errorf("failed to delete store %d: %v", storeID, string(body)) -} - -func (c *pdClient) DeleteMemberByID(memberID uint64) error { - var exist bool - members, err := c.GetMembers() - if err != nil { - return err - } - for _, member := range members.Members { - if member.MemberId == memberID { - exist = true - break - } - } - if !exist { - return nil - } - apiURL := fmt.Sprintf("%s/%s/id/%d", c.url, membersPrefix, memberID) - req, err := http.NewRequest("DELETE", apiURL, nil) - if err != nil { - return err - } - res, err := c.httpClient.Do(req) - if err != nil { - return err - } - defer httputil.DeferClose(res.Body) - if res.StatusCode == http.StatusOK || res.StatusCode == http.StatusNotFound { - return nil - } - err2 := httputil.ReadErrorBody(res.Body) - return fmt.Errorf("failed %v to delete member %d: %v", res.StatusCode, memberID, err2) -} - -func (c *pdClient) DeleteMember(name string) error { - var exist bool - members, err := c.GetMembers() - if err != nil { - return err - } - for _, member := range members.Members { - if member.Name == name { - exist = true - break - } - } - if !exist { - return nil - } - apiURL := fmt.Sprintf("%s/%s/name/%s", c.url, membersPrefix, name) - req, err := http.NewRequest("DELETE", apiURL, nil) - if err != nil { - return err - } - res, err := c.httpClient.Do(req) - if err != nil { - return err - } - defer httputil.DeferClose(res.Body) - if res.StatusCode == http.StatusOK || res.StatusCode == http.StatusNotFound { - return nil - } - err2 := httputil.ReadErrorBody(res.Body) - return fmt.Errorf("failed %v to delete member %s: %v", res.StatusCode, name, err2) -} - -func (c *pdClient) SetStoreLabels(storeID uint64, labels map[string]string) (bool, error) { - apiURL := fmt.Sprintf("%s/%s/%d/label", c.url, storePrefix, storeID) - data, err := json.Marshal(labels) - if err != nil { - return false, err - } - res, err := c.httpClient.Post(apiURL, "application/json", bytes.NewBuffer(data)) - if err != nil { - return false, err - } - defer httputil.DeferClose(res.Body) - if res.StatusCode == http.StatusOK { - return true, nil - } - err2 := httputil.ReadErrorBody(res.Body) - return false, fmt.Errorf("failed %v to set store labels: %v", res.StatusCode, err2) -} - -func (c *pdClient) UpdateReplicationConfig(config PDReplicationConfig) error { - apiURL := fmt.Sprintf("%s/%s", c.url, pdReplicationPrefix) - data, err := json.Marshal(config) - if err != nil { - return err - } - res, err := c.httpClient.Post(apiURL, "application/json", bytes.NewBuffer(data)) - if err != nil { - return err - } - defer httputil.DeferClose(res.Body) - if res.StatusCode == http.StatusOK { - return nil - } - err = httputil.ReadErrorBody(res.Body) - return fmt.Errorf("failed %v to update replication: %v", res.StatusCode, err) -} - -func (c *pdClient) BeginEvictLeader(storeID uint64) error { - leaderEvictInfo := getLeaderEvictSchedulerInfo(storeID) - apiURL := fmt.Sprintf("%s/%s", c.url, schedulersPrefix) - data, err := json.Marshal(leaderEvictInfo) - if err != nil { - return err - } - res, err := c.httpClient.Post(apiURL, "application/json", bytes.NewBuffer(data)) - if err != nil { - return err - } - defer httputil.DeferClose(res.Body) - if res.StatusCode == http.StatusOK { - return nil - } - - // pd will return an error with the body contains "scheduler existed" if the scheduler already exists - // this is not the standard response. - // so these lines are just a workaround for now: - // - make a new request to get all schedulers - // - return nil if the scheduler already exists - // - // when PD returns standard json response, we should get rid of this verbose code. - evictLeaderSchedulers, err := c.GetEvictLeaderSchedulers() - if err != nil { - return err - } - for _, s := range evictLeaderSchedulers { - if s == getLeaderEvictSchedulerStr(storeID) { - return nil - } - } - - err2 := httputil.ReadErrorBody(res.Body) - return fmt.Errorf("failed %v to begin evict leader of store:[%d],error: %v", res.StatusCode, storeID, err2) -} - -func (c *pdClient) EndEvictLeader(storeID uint64) error { - sName := getLeaderEvictSchedulerStr(storeID) - apiURL := fmt.Sprintf("%s/%s/%s", c.url, schedulersPrefix, sName) - req, err := http.NewRequest("DELETE", apiURL, nil) - if err != nil { - return err - } - res, err := c.httpClient.Do(req) - if err != nil { - return err - } - defer httputil.DeferClose(res.Body) - if res.StatusCode == http.StatusNotFound { - return nil - } - if res.StatusCode == http.StatusOK { - klog.Infof("call DELETE method: %s success", apiURL) - } else { - err2 := httputil.ReadErrorBody(res.Body) - klog.Errorf("call DELETE method: %s failed,statusCode: %v,error: %v", apiURL, res.StatusCode, err2) - } - - // pd will return an error with the body contains "scheduler not found" if the scheduler is not found - // this is not the standard response. - // so these lines are just a workaround for now: - // - make a new request to get all schedulers - // - return nil if the scheduler is not found - // - // when PD returns standard json response, we should get rid of this verbose code. - evictLeaderSchedulers, err := c.GetEvictLeaderSchedulers() - if err != nil { - return err - } - for _, s := range evictLeaderSchedulers { - if s == sName { - return fmt.Errorf("end leader evict scheduler failed,the store:[%d]'s leader evict scheduler is still exist", storeID) - } - } - - return nil -} - -func (c *pdClient) GetEvictLeaderSchedulers() ([]string, error) { - apiURL := fmt.Sprintf("%s/%s", c.url, schedulersPrefix) - body, err := httputil.GetBodyOK(c.httpClient, apiURL) - if err != nil { - return nil, err - } - var schedulers []string - err = json.Unmarshal(body, &schedulers) - if err != nil { - return nil, err - } - var evicts []string - for _, scheduler := range schedulers { - if strings.HasPrefix(scheduler, evictSchedulerLeader) { - evicts = append(evicts, scheduler) - } - } - evictSchedulers, err := c.filterLeaderEvictScheduler(evicts) - if err != nil { - return nil, err - } - return evictSchedulers, nil -} - -func (c *pdClient) GetEvictLeaderSchedulersForStores(storeIDs ...uint64) (map[uint64]string, error) { - schedulers, err := c.GetEvictLeaderSchedulers() - if err != nil { - return nil, err - } - - find := func(id uint64) string { - for _, scheduler := range schedulers { - sName := getLeaderEvictSchedulerStr(id) - if scheduler == sName { - return scheduler - } - } - return "" - } - - result := make(map[uint64]string) - for _, id := range storeIDs { - if scheduler := find(id); scheduler != "" { - result[id] = scheduler - } - } - - return result, nil -} - -// getEvictLeaderSchedulerConfig gets the config of PD scheduler "evict-leader-scheduler" -// It's available since PD 3.1.0. -// In the previous versions, PD API returns 404 and this function will return an error. -func (c *pdClient) getEvictLeaderSchedulerConfig() (*evictLeaderSchedulerConfig, error) { - apiURL := fmt.Sprintf("%s/%s", c.url, evictLeaderSchedulerConfigPrefix) - body, err := httputil.GetBodyOK(c.httpClient, apiURL) - if err != nil { - return nil, err - } - config := &evictLeaderSchedulerConfig{} - err = json.Unmarshal(body, config) - if err != nil { - return nil, err - } - return config, nil -} - -// This method is to make compatible between old pdapi version and versions after 3.1/4.0. -// To get more detail, see: -// - https://github.com/pingcap/tidb-operator/pull/1831 -// - https://github.com/pingcap/pd/issues/2550 -func (c *pdClient) filterLeaderEvictScheduler(evictLeaderSchedulers []string) ([]string, error) { - var schedulerIds []string - if len(evictLeaderSchedulers) == 1 && evictLeaderSchedulers[0] == evictSchedulerLeader { - // If there is only one evcit scehduler entry without store ID postfix. - // We should get the store IDs via scheduler config API and append them - // to provide consistent results. - c, err := c.getEvictLeaderSchedulerConfig() - if err != nil { - return nil, err - } - for k := range c.StoreIDWithRanges { - schedulerIds = append(schedulerIds, fmt.Sprintf("%s-%v", evictSchedulerLeader, k)) - } - } else { - schedulerIds = append(schedulerIds, evictLeaderSchedulers...) - } - return schedulerIds, nil -} - -func (c *pdClient) GetRecoveringMark() (bool, error) { - apiURL := fmt.Sprintf("%s/%s", c.url, recoveringMarkPrefix) - body, err := httputil.GetBodyOK(c.httpClient, apiURL) - if err != nil { - return false, err - } - recoveringMark := &RecoveringMark{} - err = json.Unmarshal(body, recoveringMark) - if err != nil { - return false, err - } - return recoveringMark.Mark, nil -} - -func (c *pdClient) GetPDLeader() (*pdpb.Member, error) { - apiURL := fmt.Sprintf("%s/%s", c.url, pdLeaderPrefix) - body, err := httputil.GetBodyOK(c.httpClient, apiURL) - if err != nil { - return nil, err - } - leader := &pdpb.Member{} - err = json.Unmarshal(body, leader) - if err != nil { - return nil, err - } - return leader, nil -} - -func (c *pdClient) TransferPDLeader(memberName string) error { - apiURL := fmt.Sprintf("%s/%s/%s", c.url, pdLeaderTransferPrefix, memberName) - req, err := http.NewRequest("POST", apiURL, nil) - if err != nil { - return err - } - res, err := c.httpClient.Do(req) - if err != nil { - return err - } - defer httputil.DeferClose(res.Body) - if res.StatusCode == http.StatusOK || res.StatusCode == http.StatusNotFound { - return nil - } - err2 := httputil.ReadErrorBody(res.Body) - return fmt.Errorf("failed %v to transfer pd leader to %s,error: %v", res.StatusCode, memberName, err2) -} - -func (c *pdClient) GetAutoscalingPlans(strategy Strategy) ([]Plan, error) { - apiURL := fmt.Sprintf("%s/%s", c.url, autoscalingPrefix) - data, err := json.Marshal(strategy) - if err != nil { - return nil, err - } - body, err := httputil.PostBodyOK(c.httpClient, apiURL, bytes.NewBuffer(data)) - if err != nil { - return nil, err - } - var plans []Plan - err = json.Unmarshal(body, &plans) - if err != nil { - return nil, err - } - return plans, nil -} - -func getLeaderEvictSchedulerInfo(storeID uint64) *schedulerInfo { - return &schedulerInfo{"evict-leader-scheduler", storeID} -} - -func getLeaderEvictSchedulerStr(storeID uint64) string { - return fmt.Sprintf("%s-%d", "evict-leader-scheduler", storeID) -} - // TiKVNotBootstrappedError represents that TiKV cluster is not bootstrapped yet type TiKVNotBootstrappedError struct { s string diff --git a/pkg/pdapi/pdapi_test.go b/pkg/pdapi/pdapi_test.go index 6bf6d9142d3..32326d51d1b 100644 --- a/pkg/pdapi/pdapi_test.go +++ b/pkg/pdapi/pdapi_test.go @@ -14,18 +14,19 @@ package pdapi import ( - "crypto/tls" + "context" "encoding/json" - "fmt" "io" "net/http" "net/http/httptest" "reflect" "testing" + "time" . "github.com/onsi/gomega" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + pd "github.com/tikv/pd/client/http" ) const ( @@ -38,7 +39,7 @@ func getClientServer(h func(http.ResponseWriter, *http.Request)) *httptest.Serve func TestHealth(t *testing.T) { g := NewGomegaWithT(t) - healths := []MemberHealth{ + healths := []pd.MemberHealth{ {Name: "pd1", MemberID: 1, Health: false}, {Name: "pd2", MemberID: 2, Health: true}, {Name: "pd3", MemberID: 3, Health: true}, @@ -49,39 +50,32 @@ func TestHealth(t *testing.T) { tcs := []struct { caseName string path string - method string resp []byte - want []MemberHealth + want []pd.MemberHealth }{{ caseName: "GetHealth", - path: fmt.Sprintf("/%s", healthPrefix), - method: "GET", resp: healthsBytes, want: healths, }} for _, tc := range tcs { svc := getClientServer(func(w http.ResponseWriter, request *http.Request) { - g.Expect(request.Method).To(Equal(tc.method), "check method") - g.Expect(request.URL.Path).To(Equal(tc.path), "check url") - - w.Header().Set("Content-Type", ContentTypeJSON) w.Write(tc.resp) }) defer svc.Close() - pdClient := NewPDClient(svc.URL, DefaultTimeout, &tls.Config{}) - result, err := pdClient.GetHealth() + pdClient := pd.NewClient([]string{svc.URL}) + result, err := pdClient.GetHealth(context.TODO()) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(result).To(Equal(&HealthInfo{healths})) + g.Expect(*result).To(Equal(pd.HealthInfo{Healths: healths})) } } func TestGetConfig(t *testing.T) { g := NewGomegaWithT(t) - config := &PDConfigFromAPI{ - Schedule: &PDScheduleConfig{ - MaxStoreDownTime: "10s", + config := &pd.ServerConfig{ + Schedule: pd.ScheduleConfig{ + MaxStoreDownTime: pd.NewDuration(10 * time.Second), }, } configBytes, err := json.Marshal(config) @@ -89,30 +83,22 @@ func TestGetConfig(t *testing.T) { tcs := []struct { caseName string - path string - method string resp []byte - want *PDConfigFromAPI + want *pd.ServerConfig }{{ caseName: "GetConfig", - path: fmt.Sprintf("/%s", configPrefix), - method: "GET", resp: configBytes, want: config, }} for _, tc := range tcs { svc := getClientServer(func(w http.ResponseWriter, request *http.Request) { - g.Expect(request.Method).To(Equal(tc.method), "check method") - g.Expect(request.URL.Path).To(Equal(tc.path), "check url") - - w.Header().Set("Content-Type", ContentTypeJSON) w.Write(tc.resp) }) defer svc.Close() - pdClient := NewPDClient(svc.URL, DefaultTimeout, &tls.Config{}) - result, err := pdClient.GetConfig() + pdClient := pd.NewClient([]string{svc.URL}) + result, err := pdClient.GetConfig(context.Background()) g.Expect(err).NotTo(HaveOccurred()) g.Expect(result).To(Equal(config)) } @@ -127,30 +113,22 @@ func TestGetCluster(t *testing.T) { tcs := []struct { caseName string - path string - method string resp []byte want *metapb.Cluster }{{ caseName: "GetCluster", - path: fmt.Sprintf("/%s", clusterIDPrefix), - method: "GET", resp: clusterBytes, want: cluster, }} for _, tc := range tcs { svc := getClientServer(func(w http.ResponseWriter, request *http.Request) { - g.Expect(request.Method).To(Equal(tc.method), "check method") - g.Expect(request.URL.Path).To(Equal(tc.path), "check url") - - w.Header().Set("Content-Type", ContentTypeJSON) w.Write(tc.resp) }) defer svc.Close() - pdClient := NewPDClient(svc.URL, DefaultTimeout, &tls.Config{}) - result, err := pdClient.GetCluster() + pdClient := pd.NewClient([]string{svc.URL}) + result, err := pdClient.GetCluster(context.TODO()) g.Expect(err).NotTo(HaveOccurred()) g.Expect(result).To(Equal(cluster)) } @@ -162,7 +140,7 @@ func TestGetMembers(t *testing.T) { member1 := &pdpb.Member{Name: "testMember1", MemberId: uint64(1)} member2 := &pdpb.Member{Name: "testMember2", MemberId: uint64(2)} - members := &MembersInfo{ + members := &pd.MembersInfo{ Members: []*pdpb.Member{ member1, member2, @@ -177,15 +155,11 @@ func TestGetMembers(t *testing.T) { tcs := []struct { caseName string - path string - method string resp []byte - want *MembersInfo + want *pd.MembersInfo }{ { caseName: "GetMembers", - path: fmt.Sprintf("/%s", membersPrefix), - method: "GET", resp: membersBytes, want: members, }, @@ -193,16 +167,12 @@ func TestGetMembers(t *testing.T) { for _, tc := range tcs { svc := getClientServer(func(w http.ResponseWriter, request *http.Request) { - g.Expect(request.Method).To(Equal(tc.method), "check method") - g.Expect(request.URL.Path).To(Equal(tc.path), "check url") - - w.Header().Set("Content-Type", ContentTypeJSON) w.Write(tc.resp) }) defer svc.Close() - pdClient := NewPDClient(svc.URL, DefaultTimeout, &tls.Config{}) - result, err := pdClient.GetMembers() + pdClient := pd.NewClient([]string{svc.URL}) + result, err := pdClient.GetMembers(context.TODO()) g.Expect(err).NotTo(HaveOccurred()) g.Expect(result).To(Equal(members)) } @@ -210,17 +180,15 @@ func TestGetMembers(t *testing.T) { func TestGetStores(t *testing.T) { g := NewGomegaWithT(t) - store1 := &StoreInfo{ - Store: &MetaStore{Store: &metapb.Store{Id: uint64(1), State: metapb.StoreState_Up}}, - Status: &StoreStatus{}, + store1 := pd.StoreInfo{ + Store: pd.MetaStore{ID: 1, State: int64(metapb.StoreState_Up)}, } - store2 := &StoreInfo{ - Store: &MetaStore{Store: &metapb.Store{Id: uint64(2), State: metapb.StoreState_Up}}, - Status: &StoreStatus{}, + store2 := pd.StoreInfo{ + Store: pd.MetaStore{ID: 2, State: int64(metapb.StoreState_Up)}, } - stores := &StoresInfo{ + stores := &pd.StoresInfo{ Count: 2, - Stores: []*StoreInfo{ + Stores: []pd.StoreInfo{ store1, store2, }, @@ -231,30 +199,22 @@ func TestGetStores(t *testing.T) { tcs := []struct { caseName string - path string - method string resp []byte - want *StoresInfo + want *pd.StoresInfo }{{ caseName: "GetStores", - path: fmt.Sprintf("/%s", storesPrefix), - method: "GET", resp: storesBytes, want: stores, }} for _, tc := range tcs { svc := getClientServer(func(w http.ResponseWriter, request *http.Request) { - g.Expect(request.Method).To(Equal(tc.method), "check method") - g.Expect(request.URL.Path).To(Equal(tc.path), "check url") - - w.Header().Set("Content-Type", ContentTypeJSON) w.Write(tc.resp) }) defer svc.Close() - pdClient := NewPDClient(svc.URL, DefaultTimeout, &tls.Config{}) - result, err := pdClient.GetStores() + pdClient := pd.NewClient([]string{svc.URL}) + result, err := pdClient.GetStores(context.TODO()) g.Expect(err).NotTo(HaveOccurred()) g.Expect(result).To(Equal(stores)) } @@ -263,10 +223,10 @@ func TestGetStores(t *testing.T) { func TestGetStore(t *testing.T) { g := NewGomegaWithT(t) - id := uint64(1) - store := &StoreInfo{ - Store: &MetaStore{Store: &metapb.Store{Id: id, State: metapb.StoreState_Up}}, - Status: &StoreStatus{}, + id := int64(1) + store := &pd.StoreInfo{ + Store: pd.MetaStore{ID: id, State: int64(metapb.StoreState_Up)}, + Status: pd.StoreStatus{}, } storeBytes, err := json.Marshal(store) @@ -274,15 +234,11 @@ func TestGetStore(t *testing.T) { tcs := []struct { caseName string - path string - method string - id uint64 + id int64 resp []byte - want *StoreInfo + want *pd.StoreInfo }{{ caseName: "GetStore", - path: fmt.Sprintf("/%s/%d", storePrefix, id), - method: "GET", id: id, resp: storeBytes, want: store, @@ -290,16 +246,12 @@ func TestGetStore(t *testing.T) { for _, tc := range tcs { svc := getClientServer(func(w http.ResponseWriter, request *http.Request) { - g.Expect(request.Method).To(Equal(tc.method), "test method") - g.Expect(request.URL.Path).To(Equal(tc.path), "test url") - - w.Header().Set("Content-Type", ContentTypeJSON) w.Write(tc.resp) }) defer svc.Close() - pdClient := NewPDClient(svc.URL, DefaultTimeout, &tls.Config{}) - result, err := pdClient.GetStore(tc.id) + pdClient := pd.NewClient([]string{svc.URL}) + result, err := pdClient.GetStore(context.TODO(), uint64(tc.id)) g.Expect(err).NotTo(HaveOccurred()) g.Expect(result).To(Equal(store)) } @@ -311,34 +263,24 @@ func TestSetStoreLabels(t *testing.T) { labels := map[string]string{"testkey": "testvalue"} tcs := []struct { caseName string - path string - method string want bool }{{ caseName: "success_SetStoreLabels", - path: fmt.Sprintf("/%s/%d/label", storePrefix, id), - method: "POST", want: true, }, { caseName: "failed_SetStoreLabels", - path: fmt.Sprintf("/%s/%d/label", storePrefix, id), - method: "POST", want: false, }, } for _, tc := range tcs { svc := getClientServer(func(w http.ResponseWriter, request *http.Request) { - g.Expect(request.Method).To(Equal(tc.method), "check method") - g.Expect(request.URL.Path).To(Equal(tc.path), "check url") - labels := &map[string]string{} err := readJSON(request.Body, labels) g.Expect(err).NotTo(HaveOccurred()) g.Expect(labels).To(Equal(labels), "check labels") - w.Header().Set("Content-Type", ContentTypeJSON) if tc.want { w.WriteHeader(http.StatusOK) } else { @@ -347,88 +289,42 @@ func TestSetStoreLabels(t *testing.T) { }) defer svc.Close() - pdClient := NewPDClient(svc.URL, DefaultTimeout, &tls.Config{}) - result, _ := pdClient.SetStoreLabels(id, labels) + pdClient := pd.NewClient([]string{svc.URL}) + result, _ := pdClient.SetStoreLabels(context.TODO(), id, labels) g.Expect(result).To(Equal(tc.want)) } } func TestDeleteMember(t *testing.T) { g := NewGomegaWithT(t) - name := "testMember" - member := &pdpb.Member{Name: name, MemberId: uint64(1)} - membersExist := &MembersInfo{ - Members: []*pdpb.Member{ - member, - }, - Leader: member, - EtcdLeader: member, - } - membersExistBytes, err := json.Marshal(membersExist) - g.Expect(err).NotTo(HaveOccurred()) - - membersNotExist := &MembersInfo{ - Members: []*pdpb.Member{}, - } - membersNotExistBytes, err := json.Marshal(membersNotExist) - g.Expect(err).NotTo(HaveOccurred()) tcs := []struct { - caseName string - prePath string - preMethod string - preResp []byte - exist bool - path string - method string - want bool + caseName string + exist bool + want bool }{{ - caseName: "success_DeleteMember", - prePath: fmt.Sprintf("/%s", membersPrefix), - preMethod: "GET", - preResp: membersExistBytes, - exist: true, - path: fmt.Sprintf("/%s/name/%s", membersPrefix, name), - method: "DELETE", - want: true, + caseName: "success_DeleteMember", + exist: true, + want: true, }, { - caseName: "failed_DeleteMember", - prePath: fmt.Sprintf("/%s", membersPrefix), - preMethod: "GET", - preResp: membersExistBytes, - exist: true, - path: fmt.Sprintf("/%s/name/%s", membersPrefix, name), - method: "DELETE", - want: false, + caseName: "failed_DeleteMember", + exist: true, + want: false, }, { - caseName: "delete_not_exist_member", - prePath: fmt.Sprintf("/%s", membersPrefix), - preMethod: "GET", - preResp: membersNotExistBytes, - exist: false, - path: fmt.Sprintf("/%s/name/%s", membersPrefix, name), - method: "DELETE", - want: true, + caseName: "delete_not_exist_member", + exist: false, + want: true, }, } for _, tc := range tcs { - count := 1 svc := getClientServer(func(w http.ResponseWriter, request *http.Request) { - if count == 1 { - g.Expect(request.Method).To(Equal(tc.preMethod), "check method") - g.Expect(request.URL.Path).To(Equal(tc.prePath), "check url") - w.Header().Set("Content-Type", ContentTypeJSON) - w.WriteHeader(http.StatusOK) - w.Write(tc.preResp) - count++ + if !tc.exist { + w.WriteHeader(http.StatusInternalServerError) return } g.Expect(tc.exist).To(BeTrue()) - g.Expect(request.Method).To(Equal(tc.method), "check method") - g.Expect(request.URL.Path).To(Equal(tc.path), "check url") - w.Header().Set("Content-Type", ContentTypeJSON) if tc.want { w.WriteHeader(http.StatusOK) } else { @@ -437,9 +333,9 @@ func TestDeleteMember(t *testing.T) { }) defer svc.Close() - pdClient := NewPDClient(svc.URL, DefaultTimeout, &tls.Config{}) - err := pdClient.DeleteMember(name) - if tc.want { + pdClient := pd.NewClient([]string{svc.URL}) + err := pdClient.DeleteMember(context.TODO(), "testMember") + if tc.want && tc.exist { g.Expect(err).NotTo(HaveOccurred(), "check result") } else { g.Expect(err).To(HaveOccurred(), "check result") @@ -449,80 +345,33 @@ func TestDeleteMember(t *testing.T) { func TestDeleteMemberByID(t *testing.T) { g := NewGomegaWithT(t) - id := uint64(1) - member := &pdpb.Member{Name: "test", MemberId: id} - membersExist := &MembersInfo{ - Members: []*pdpb.Member{ - member, - }, - Leader: member, - EtcdLeader: member, - } - membersExistBytes, err := json.Marshal(membersExist) - g.Expect(err).NotTo(HaveOccurred()) - - membersNotExist := &MembersInfo{ - Members: []*pdpb.Member{}, - } - membersNotExistBytes, err := json.Marshal(membersNotExist) - g.Expect(err).NotTo(HaveOccurred()) tcs := []struct { - caseName string - prePath string - preMethod string - preResp []byte - exist bool - path string - method string - want bool + caseName string + preResp []byte + exist bool + want bool }{{ - caseName: "success_DeleteMemberByID", - prePath: fmt.Sprintf("/%s", membersPrefix), - preMethod: "GET", - preResp: membersExistBytes, - exist: true, - path: fmt.Sprintf("/%s/id/%d", membersPrefix, id), - method: "DELETE", - want: true, + caseName: "success_DeleteMemberByID", + exist: true, + want: true, }, { - caseName: "failed_DeleteMemberByID", - prePath: fmt.Sprintf("/%s", membersPrefix), - preMethod: "GET", - preResp: membersExistBytes, - exist: true, - path: fmt.Sprintf("/%s/id/%d", membersPrefix, id), - method: "DELETE", - want: false, + caseName: "failed_DeleteMemberByID", + exist: true, + want: false, }, { - caseName: "delete_not_exit_member", - prePath: fmt.Sprintf("/%s", membersPrefix), - preMethod: "GET", - preResp: membersNotExistBytes, - exist: false, - path: fmt.Sprintf("/%s/id/%d", membersPrefix, id), - method: "DELETE", - want: true, + caseName: "delete_not_exit_member", + exist: false, + want: true, }, } for _, tc := range tcs { - count := 1 svc := getClientServer(func(w http.ResponseWriter, request *http.Request) { - if count == 1 { - g.Expect(request.Method).To(Equal(tc.preMethod), "check method") - g.Expect(request.URL.Path).To(Equal(tc.prePath), "check url") - w.Header().Set("Content-Type", ContentTypeJSON) - w.WriteHeader(http.StatusOK) - w.Write(tc.preResp) - count++ + if !tc.exist { + w.WriteHeader(http.StatusInternalServerError) return } - - g.Expect(tc.exist).To(BeTrue()) - g.Expect(request.Method).To(Equal(tc.method), "check method") - g.Expect(request.URL.Path).To(Equal(tc.path), "check url") - w.Header().Set("Content-Type", ContentTypeJSON) if tc.want { w.WriteHeader(http.StatusOK) } else { @@ -531,9 +380,9 @@ func TestDeleteMemberByID(t *testing.T) { }) defer svc.Close() - pdClient := NewPDClient(svc.URL, DefaultTimeout, &tls.Config{}) - err := pdClient.DeleteMemberByID(id) - if tc.want { + pdClient := pd.NewClient([]string{svc.URL}) + err := pdClient.DeleteMemberByID(context.TODO(), 1) + if tc.want && tc.exist { g.Expect(err).NotTo(HaveOccurred(), "check result") } else { g.Expect(err).To(HaveOccurred(), "check result") @@ -543,78 +392,34 @@ func TestDeleteMemberByID(t *testing.T) { func TestDeleteStore(t *testing.T) { g := NewGomegaWithT(t) - storeID := uint64(1) - store := &StoreInfo{ - Store: &MetaStore{Store: &metapb.Store{Id: storeID, State: metapb.StoreState_Up}}, - Status: &StoreStatus{}, - } - stores := &StoresInfo{ - Count: 1, - Stores: []*StoreInfo{ - store, - }, - } - - storesBytes, err := json.Marshal(stores) - g.Expect(err).NotTo(HaveOccurred()) tcs := []struct { - caseName string - prePath string - preMethod string - preResp []byte - exist bool - path string - method string - want bool + caseName string + preResp []byte + exist bool + want bool }{{ - caseName: "success_DeleteStore", - prePath: fmt.Sprintf("/%s", storesPrefix), - preMethod: "GET", - preResp: storesBytes, - exist: true, - path: fmt.Sprintf("/%s/%d", storePrefix, storeID), - method: "DELETE", - want: true, + caseName: "success_DeleteStore", + exist: true, + want: true, }, { - caseName: "failed_DeleteStore", - prePath: fmt.Sprintf("/%s", storesPrefix), - preMethod: "GET", - preResp: storesBytes, - exist: true, - path: fmt.Sprintf("/%s/%d", storePrefix, storeID), - method: "DELETE", - want: false, + caseName: "failed_DeleteStore", + exist: true, + want: false, }, { - caseName: "delete_not_exist_store", - prePath: fmt.Sprintf("/%s", storesPrefix), - preMethod: "GET", - preResp: storesBytes, - exist: true, - path: fmt.Sprintf("/%s/%d", storePrefix, storeID), - method: "DELETE", - want: true, + caseName: "delete_not_exist_store", + exist: true, + want: true, }, } for _, tc := range tcs { - count := 1 svc := getClientServer(func(w http.ResponseWriter, request *http.Request) { - if count == 1 { - g.Expect(request.Method).To(Equal(tc.preMethod), "check method") - g.Expect(request.URL.Path).To(Equal(tc.prePath), "check url") - w.Header().Set("Content-Type", ContentTypeJSON) - w.WriteHeader(http.StatusOK) - w.Write(tc.preResp) - count++ + if !tc.exist { + w.WriteHeader(http.StatusInternalServerError) return } - g.Expect(tc.exist).To(BeTrue()) - g.Expect(request.Method).To(Equal(tc.method), "check method") - g.Expect(request.URL.Path).To(Equal(tc.path), "check url") - - w.Header().Set("Content-Type", ContentTypeJSON) if tc.want { w.WriteHeader(http.StatusOK) } else { @@ -623,9 +428,9 @@ func TestDeleteStore(t *testing.T) { }) defer svc.Close() - pdClient := NewPDClient(svc.URL, DefaultTimeout, &tls.Config{}) - err := pdClient.DeleteStore(storeID) - if tc.want { + pdClient := pd.NewClient([]string{svc.URL}) + err := pdClient.DeleteStore(context.TODO(), 1) + if tc.want && tc.exist { g.Expect(err).NotTo(HaveOccurred(), "check result") } else { g.Expect(err).To(HaveOccurred(), "check result") @@ -636,9 +441,6 @@ func TestDeleteStore(t *testing.T) { func TestGetEvictLeaderSchedulersForStores(t *testing.T) { g := NewGomegaWithT(t) - expectHTTPMethod := "GET" - expectURLPath := fmt.Sprintf("/%s", schedulersPrefix) - type testcase struct { name string @@ -653,9 +455,9 @@ func TestGetEvictLeaderSchedulersForStores(t *testing.T) { storeIDs: []uint64{1, 7, 8}, resp: []byte(` [ - "evict-leader-scheduler-1", - "evict-leader-scheduler-7", - "evict-leader-scheduler-8", + "evict-leader-scheduler-1", + "evict-leader-scheduler-7", + "evict-leader-scheduler-8", "evict-leader-scheduler-9" ] `), @@ -674,9 +476,9 @@ func TestGetEvictLeaderSchedulersForStores(t *testing.T) { storeIDs: []uint64{1, 7, 10}, resp: []byte(` [ - "evict-leader-scheduler-1", - "evict-leader-scheduler-7", - "evict-leader-scheduler-8", + "evict-leader-scheduler-1", + "evict-leader-scheduler-7", + "evict-leader-scheduler-8", "evict-leader-scheduler-9" ] `), @@ -695,8 +497,8 @@ func TestGetEvictLeaderSchedulersForStores(t *testing.T) { storeIDs: []uint64{1, 7, 10}, resp: []byte(` [ - "evict-leader-scheduler-2", - "evict-leader-scheduler-8", + "evict-leader-scheduler-2", + "evict-leader-scheduler-8", "evict-leader-scheduler-9" ] `), @@ -711,17 +513,14 @@ func TestGetEvictLeaderSchedulersForStores(t *testing.T) { t.Logf("test case: %s", tc.name) svc := getClientServer(func(w http.ResponseWriter, request *http.Request) { - g.Expect(request.Method).To(Equal(expectHTTPMethod), "check method") - g.Expect(request.URL.Path).To(Equal(expectURLPath), "check url") - w.Header().Set("Content-Type", ContentTypeJSON) w.WriteHeader(http.StatusOK) w.Write(tc.resp) }) defer svc.Close() - pdClient := NewPDClient(svc.URL, DefaultTimeout, &tls.Config{}) - schedulers, err := pdClient.GetEvictLeaderSchedulersForStores(tc.storeIDs...) + pdClient := pd.NewClient([]string{svc.URL}) + schedulers, err := GetEvictLeaderSchedulersForStores(context.TODO(), pdClient, tc.storeIDs...) tc.expect(schedulers, err) } } @@ -760,77 +559,17 @@ func TestGeneric(t *testing.T) { args []reflect.Value resp []byte statusCode int - wantMethod string - wantPath string wantQuery string checkResult func(t *testing.T, results []reflect.Value) }{ - { - name: "GetTombStoneStores", - method: "GetTombStoneStores", - resp: []byte(`{ - "count": 1, - "stores": [ - { - "store": { - }, - "status": { - } - } - ] -} -`), - statusCode: http.StatusOK, - wantMethod: "GET", - wantPath: fmt.Sprintf("/%s", storesPrefix), - wantQuery: fmt.Sprintf("state=%d", metapb.StoreState_Tombstone), - checkResult: checkNoError, - }, { name: "UpdateReplicationConfig", method: "UpdateReplicationConfig", args: []reflect.Value{ - reflect.ValueOf(PDReplicationConfig{}), + reflect.ValueOf(pd.ReplicationConfig{}), }, resp: []byte(``), statusCode: http.StatusOK, - wantMethod: "POST", - wantPath: fmt.Sprintf("/%s", pdReplicationPrefix), - checkResult: checkNoError, - }, - { - name: "BeginEvictLeader", - method: "BeginEvictLeader", - args: []reflect.Value{ - reflect.ValueOf(uint64(1)), - }, - statusCode: http.StatusOK, - wantMethod: "POST", - wantPath: fmt.Sprintf("/%s", schedulersPrefix), - checkResult: checkNoError, - }, - { - name: "EndEvictLeader", - method: "EndEvictLeader", - args: []reflect.Value{ - reflect.ValueOf(uint64(1)), - }, - statusCode: http.StatusNotFound, - wantMethod: "DELETE", - wantPath: fmt.Sprintf("/%s/evict-leader-scheduler-1", schedulersPrefix), - checkResult: checkNoError, - }, - { - name: "GetEvictLeaderSchedulers", - method: "GetEvictLeaderSchedulers", - resp: []byte(` -[ - "evict-leader-scheduler-1" -] -`), - statusCode: http.StatusOK, - wantMethod: "GET", - wantPath: fmt.Sprintf("/%s", schedulersPrefix), checkResult: checkNoError, }, // TODO test the fix https://github.com/pingcap/tidb-operator/pull/2809 @@ -848,8 +587,6 @@ func TestGeneric(t *testing.T) { } `), statusCode: http.StatusOK, - wantMethod: "GET", - wantPath: fmt.Sprintf("/%s", pdLeaderPrefix), checkResult: checkNoError, }, { @@ -862,8 +599,6 @@ func TestGeneric(t *testing.T) { `), statusCode: http.StatusOK, - wantMethod: "POST", - wantPath: fmt.Sprintf("/%s/%s", pdLeaderTransferPrefix, "foo"), checkResult: checkNoError, }, } @@ -872,8 +607,6 @@ func TestGeneric(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewGomegaWithT(t) server := getClientServer(func(w http.ResponseWriter, request *http.Request) { - g.Expect(request.Method).To(Equal(tt.wantMethod), "check method") - g.Expect(request.URL.Path).To(Equal(tt.wantPath), "check path") g.Expect(request.URL.RawQuery).To(Equal(tt.wantQuery), "check query") w.Header().Set("Content-Type", ContentTypeJSON) @@ -882,9 +615,10 @@ func TestGeneric(t *testing.T) { }) defer server.Close() - pdClient := NewPDClient(server.URL, DefaultTimeout, &tls.Config{}) + pdClient := pd.NewClient([]string{server.URL}) args := []reflect.Value{ reflect.ValueOf(pdClient), + reflect.ValueOf(context.TODO()), } args = append(args, tt.args...) method, ok := reflect.TypeOf(pdClient).MethodByName(tt.method) diff --git a/pkg/pdapi/pdutil.go b/pkg/pdapi/pdutil.go index c16ee6fce38..65b1131f004 100644 --- a/pkg/pdapi/pdutil.go +++ b/pkg/pdapi/pdutil.go @@ -14,26 +14,27 @@ package pdapi import ( + "context" "fmt" + "strings" + "github.com/pingcap/errors" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + pd "github.com/tikv/pd/client/http" ) // IsTiKVStable checks if cluster is stable by quering PD and checking status of all stores // we check PD directly instead of checking the same information already available in the CR status to get the most up to data // stores status in PD could be up to 20 seconds stale https://docs.pingcap.com/tidb/stable/tidb-scheduling#information-collection -func IsTiKVStable(pdClient PDClient) string { - storesInfo, err := pdClient.GetStores() +func IsTiKVStable(pdClient pd.Client) string { + storesInfo, err := pdClient.GetStores(context.TODO()) if err != nil { return fmt.Sprintf("can't access PD: %s", err) } for _, store := range storesInfo.Stores { - if store.Store == nil { - return "missing data for one of its stores" - } if store.Store.StateName != v1alpha1.TiKVStateUp && store.Store.StateName != v1alpha1.TiKVStateTombstone { - return fmt.Sprintf("Strore %d is not up: %s", store.Store.GetId(), store.Store.StateName) + return fmt.Sprintf("Strore %d is not up: %s", store.Store.ID, store.Store.StateName) } } @@ -41,8 +42,8 @@ func IsTiKVStable(pdClient PDClient) string { } // IsPDStable queries PD to verify that there are quorum + 1 healthy PDs -func IsPDStable(pdClient PDClient) string { - healthInfo, err := pdClient.GetHealth() +func IsPDStable(pdClient pd.Client) string { + healthInfo, err := pdClient.GetHealth(context.TODO()) if err != nil { return fmt.Sprintf("can't access PD: %s", err) } @@ -62,3 +63,126 @@ func IsPDStable(pdClient PDClient) string { return fmt.Sprintf("Only %d out of %d PDs are healthy", healthy, total) } } + +func BeginEvictLeader(ctx context.Context, pdClient pd.Client, storeID uint64) error { + err := pdClient.CreateScheduler(ctx, "evict-leader-scheduler", storeID) + if err != nil { + return err + } + + // pd will return an error with the body contains "scheduler existed" if the scheduler already exists + // this is not the standard response. + // so these lines are just a workaround for now: + // - make a new request to get all schedulers + // - return nil if the scheduler already exists + // + // when PD returns standard json response, we should get rid of this verbose code. + //evictLeaderSchedulers, err := pdClient.GetEvictLeaderSchedulers() + evictLeaderSchedulers, err := GetEvictLeaderSchedulers(ctx, pdClient) + if err != nil { + return err + } + for _, scheduler := range evictLeaderSchedulers { + if scheduler == getLeaderEvictSchedulerStr(storeID) { + return nil + } + } + return errors.New("scheduler not existed") +} + +func EndEvictLeader(ctx context.Context, pdClient pd.Client, storeID uint64) error { + sName := getLeaderEvictSchedulerStr(storeID) + // delete + err := pdClient.DeleteScheduler(ctx, "evict-leader-scheduler", storeID) + if err != nil { + return err + } + + // pd will return an error with the body contains "scheduler not found" if the scheduler is not found + // this is not the standard response. + // so these lines are just a workaround for now: + // - make a new request to get all schedulers + // - return nil if the scheduler is not found + // + // when PD returns standard json response, we should get rid of this verbose code. + evictLeaderSchedulers, err := GetEvictLeaderSchedulers(ctx, pdClient) + if err != nil { + return err + } + for _, s := range evictLeaderSchedulers { + if s == sName { + return fmt.Errorf("end leader evict scheduler failed,the store:[%d]'s leader evict scheduler is still exist", storeID) + } + } + + return nil +} + +func getLeaderEvictSchedulerStr(storeID uint64) string { + return fmt.Sprintf("%s-%d", "evict-leader-scheduler", storeID) +} + +func GetEvictLeaderSchedulers(ctx context.Context, pdClient pd.Client) ([]string, error) { + schedulers, err := pdClient.GetSchedulers(ctx) + if err != nil { + return nil, err + } + + var evicts []string + for _, scheduler := range schedulers { + if strings.HasPrefix(scheduler, evictSchedulerLeader) { + evicts = append(evicts, scheduler) + } + } + + evictSchedulers, err := filterLeaderEvictScheduler(pdClient, evicts) + if err != nil { + return nil, err + } + return evictSchedulers, nil +} + +func filterLeaderEvictScheduler(pdClient pd.Client, evictLeaderSchedulers []string) ([]string, error) { + var schedulerIds []string + if len(evictLeaderSchedulers) == 1 && evictLeaderSchedulers[0] == evictSchedulerLeader { + // If there is only one evcit scehduler entry without store ID postfix. + // We should get the store IDs via scheduler config API and append them + // to provide consistent results. + c, err := pdClient.GetEvictLeaderSchedulerConfig(context.TODO()) + if err != nil { + return nil, err + } + for k := range c.StoreIDWithRanges { + schedulerIds = append(schedulerIds, fmt.Sprintf("%s-%v", evictSchedulerLeader, k)) + } + } else { + schedulerIds = append(schedulerIds, evictLeaderSchedulers...) + } + return schedulerIds, nil +} + +func GetEvictLeaderSchedulersForStores(ctx context.Context, pdClient pd.Client, storeIDs ...uint64) (map[uint64]string, error) { + schedulers, err := pdClient.GetSchedulers(ctx) + if err != nil { + return nil, err + } + + find := func(id uint64) string { + for _, scheduler := range schedulers { + sName := getLeaderEvictSchedulerStr(id) + if scheduler == sName { + return scheduler + } + } + return "" + } + + result := make(map[uint64]string) + for _, id := range storeIDs { + if scheduler := find(id); scheduler != "" { + result[id] = scheduler + } + } + + return result, nil +} diff --git a/pkg/util/util.go b/pkg/util/util.go index bafead22a0f..277f703f894 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -23,11 +23,12 @@ import ( "strings" "github.com/pingcap/advanced-statefulset/client/apis/apps/v1/helper" - "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb-operator/pkg/apis/label" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/features" "github.com/sethvargo/go-password/password" + pd "github.com/tikv/pd/client/http" + apps "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -374,7 +375,7 @@ func VolumeClaimTemplate(r corev1.ResourceRequirements, metaName string, storage } } -func MatchLabelFromStoreLabels(storeLabels []*metapb.StoreLabel, componentLabel string) bool { +func MatchLabelFromStoreLabels(storeLabels []pd.StoreLabel, componentLabel string) bool { storeKind := label.TiKVLabelVal for _, storeLabel := range storeLabels { if storeLabel.Key == "engine" && storeLabel.Value == label.TiFlashLabelVal { diff --git a/tests/e2e/tidbcluster/tidbcluster.go b/tests/e2e/tidbcluster/tidbcluster.go index 9030c731619..3077c321977 100644 --- a/tests/e2e/tidbcluster/tidbcluster.go +++ b/tests/e2e/tidbcluster/tidbcluster.go @@ -16,6 +16,7 @@ package tidbcluster import ( "context" "fmt" + "github.com/pingcap/tidb-operator/pkg/pdapi" _ "net/http/pprof" "strconv" "strings" @@ -396,7 +397,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { framework.ExpectNoError(err, "failed to create proxied PD client") defer cancel() - evictLeaderSchedulers, err := pdClient.GetEvictLeaderSchedulers() + evictLeaderSchedulers, err := pdapi.GetEvictLeaderSchedulers(context.TODO(), pdClient) framework.ExpectNoError(err, "failed to get EvictLeader") res := utiltc.MustPDHasScheduler(evictLeaderSchedulers, "evict-leader-scheduler") framework.ExpectEqual(res, false) @@ -1319,7 +1320,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { pdClient, cancel, err := proxiedpdclient.NewProxiedPDClient(secretLister, fw, ns, tcName, true) framework.ExpectNoError(err, "create pdClient error") defer cancel() - storeInfo, err := pdClient.GetStores() + storeInfo, err := pdClient.GetStores(context.TODO()) if err != nil { log.Logf("failed to get stores, %v", err) } @@ -1547,7 +1548,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { pdClient, cancel, err := proxiedpdclient.NewProxiedPDClient(secretLister, fw, ns, originTc.Name, false) framework.ExpectNoError(err, "create pdClient error") defer cancel() - storeInfo, err := pdClient.GetStores() + storeInfo, err := pdClient.GetStores(context.TODO()) if err != nil { log.Logf("failed to get stores, %v", err) } @@ -1605,7 +1606,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { pdClient, cancel, err := proxiedpdclient.NewProxiedPDClient(secretLister, fw, ns, originTc.Name, false) framework.ExpectNoError(err, "create pdClient error") defer cancel() - storeInfo, err := pdClient.GetStores() + storeInfo, err := pdClient.GetStores(context.TODO()) if err != nil { log.Logf("failed to get stores, %v", err) } @@ -2383,7 +2384,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { framework.ExpectNoError(err, "fail to create proxied pdClient") defer cancel() err = wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) { - schedulers, err := pdClient.GetEvictLeaderSchedulers() + schedulers, err := pdapi.GetEvictLeaderSchedulers(context.TODO(), pdClient) framework.ExpectNoError(err, "failed to get evict leader schedulers") if len(schedulers) != 0 { log.Logf("there are %d evict leader left: %v", len(schedulers), schedulers) @@ -2782,7 +2783,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { defer cancel() _ = wait.PollImmediate(15*time.Second, 3*time.Minute, func() (bool, error) { - storesInfo, err := pdClient.GetStores() + storesInfo, err := pdClient.GetStores(context.TODO()) framework.ExpectNoError(err, "get stores info error") framework.ExpectEqual(storesInfo.Count, 3, "Expect number of stores is 3") for _, store := range storesInfo.Stores { diff --git a/tests/e2e/util/proxiedpdclient/proxiedpdclient.go b/tests/e2e/util/proxiedpdclient/proxiedpdclient.go index 7c5187f1bd4..7aced561a58 100644 --- a/tests/e2e/util/proxiedpdclient/proxiedpdclient.go +++ b/tests/e2e/util/proxiedpdclient/proxiedpdclient.go @@ -17,6 +17,7 @@ import ( "context" "crypto/tls" "fmt" + pd "github.com/tikv/pd/client/http" "net/url" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" @@ -38,7 +39,7 @@ import ( // log.Fatal(err) // } // defer cancel() -func NewProxiedPDClient(secretLister corelisterv1.SecretLister, fw utilportforward.PortForward, namespace string, tcName string, tlsEnabled bool) (pdapi.PDClient, context.CancelFunc, error) { +func NewProxiedPDClient(secretLister corelisterv1.SecretLister, fw utilportforward.PortForward, namespace string, tcName string, tlsEnabled bool) (pd.Client, context.CancelFunc, error) { var tlsConfig *tls.Config var err error scheme := "http" @@ -57,9 +58,9 @@ func NewProxiedPDClient(secretLister corelisterv1.SecretLister, fw utilportforwa Scheme: scheme, Host: fmt.Sprintf("%s:%d", localHost, localPort), } - return pdapi.NewPDClient(u.String(), pdapi.DefaultTimeout, tlsConfig), cancel, nil + return pd.NewClient([]string{u.String()}, pd.WithTLSConfig(tlsConfig)), cancel, nil } -func NewProxiedPDClientFromTidbCluster(fw utilportforward.PortForward, secretLister corelisterv1.SecretLister, tc *v1alpha1.TidbCluster) (pdapi.PDClient, context.CancelFunc, error) { +func NewProxiedPDClientFromTidbCluster(fw utilportforward.PortForward, secretLister corelisterv1.SecretLister, tc *v1alpha1.TidbCluster) (pd.Client, context.CancelFunc, error) { return NewProxiedPDClient(secretLister, fw, tc.GetNamespace(), tc.GetName(), tc.IsTLSClusterEnabled()) }