From a53ac2460bb2caa4eb2299541729f7d7e43121b8 Mon Sep 17 00:00:00 2001 From: Martin Weindel Date: Tue, 12 Jul 2022 14:22:26 +0200 Subject: [PATCH 1/7] routing policy for aws-route53 --- docs/aws-route53/README.md | 119 ++++- examples/40-entry-weighted.yaml | 39 ++ examples/controller-registration.yaml | 4 +- .../crds/dns.gardener.cloud_dnsentries.yaml | 40 ++ .../crds/dns.gardener.cloud_dnsproviders.yaml | 4 +- pkg/apis/dns/crds/zz_generated_crds.go | 44 +- pkg/apis/dns/v1alpha1/dnsentry.go | 15 + .../dns/v1alpha1/zz_generated.deepcopy.go | 33 ++ .../dns/clientset/versioned/clientset.go | 4 + pkg/controller/provider/alicloud/state.go | 7 +- pkg/controller/provider/aws/aliastarget.go | 17 +- pkg/controller/provider/aws/execution.go | 39 +- pkg/controller/provider/aws/handler.go | 42 +- pkg/controller/provider/aws/routingpolicy.go | 76 +++ .../provider/azure-private/execution.go | 15 +- .../provider/azure-private/handler.go | 15 +- pkg/controller/provider/azure/execution.go | 15 +- pkg/controller/provider/azure/handler.go | 15 +- pkg/controller/provider/cloudflare/state.go | 9 +- pkg/controller/provider/google/execution.go | 9 + pkg/controller/provider/infoblox/state.go | 33 +- pkg/controller/provider/netlify/state.go | 9 +- .../provider/openstack/execution.go | 11 +- pkg/controller/provider/openstack/handler.go | 7 + .../provider/openstack/handler_test.go | 40 +- pkg/dns/dnsset.go | 116 ++++- pkg/dns/mapping.go | 24 +- pkg/dns/mapping_test.go | 8 +- pkg/dns/provider/changemodel.go | 150 +++--- pkg/dns/provider/dedicatedrecord.go | 30 +- pkg/dns/provider/entry.go | 54 ++- pkg/dns/provider/errors/errors.go | 5 +- pkg/dns/provider/inmemory.go | 2 +- pkg/dns/provider/raw/execution.go | 28 +- pkg/dns/provider/raw/records.go | 13 +- pkg/dns/provider/state.go | 43 +- pkg/dns/provider/state_entry.go | 6 +- pkg/dns/provider/state_provider.go | 2 +- pkg/dns/provider/state_zone.go | 10 +- pkg/dns/provider/zonecache.go | 8 +- pkg/dns/records.go | 25 +- pkg/dns/utils/target.go | 19 +- pkg/dns/utils/utils_dns.go | 6 +- pkg/dns/utils/utils_entry.go | 37 ++ pkg/dns/utils/utils_lock.go | 12 + pkg/server/remote/common/remote.pb.go | 450 +++++++++++------- pkg/server/remote/common/remote.proto | 8 + pkg/server/remote/conversion/conversion.go | 75 ++- .../remote/conversion/conversion_test.go | 50 +- test/integration/testenv.go | 2 +- 50 files changed, 1371 insertions(+), 473 deletions(-) create mode 100644 examples/40-entry-weighted.yaml create mode 100644 pkg/controller/provider/aws/routingpolicy.go diff --git a/docs/aws-route53/README.md b/docs/aws-route53/README.md index c433021d1..5640a1ee0 100644 --- a/docs/aws-route53/README.md +++ b/docs/aws-route53/README.md @@ -98,4 +98,121 @@ data: ``` You may need to mount an additional volume as the AWS client expects environment variable with token path and volume mount with the token file. -See Helm chart values `custom.volumes` and `custom.volumeMounts`. \ No newline at end of file +See Helm chart values `custom.volumes` and `custom.volumeMounts`. + +## Routing Policy + +The AWS Route53 provider supports currently only the `weighted` routing policy. + +### Weighted Routing Policy + +Each weighted record set is defined by a separate `DNSEntry`. In this way it is possible to use different dns-controller-manager deployments +acting on the same domain names. Every record set needs a `SetIdentifier` which must be unique for all used identifier of the domain name. +Weighted routing policy is supported for all record types, i.e. `A`, `AAAA`, `CNAME`, and `TXT`. +All entries of the same domain name must have the same record type and TTL. + +#### Example for A/B testing + +You want to perform an A/B testing for a service using the domain name `my.service.example.com`. +You want that 90% goes to instance A and 10% to instance B. +You can create these two `DNSEntries` using the same domain name, but different set identifiers + +```yaml +apiVersion: dns.gardener.cloud/v1alpha1 +kind: DNSEntry +metadata: + annotations: + # If you are delegating the DNS management to Gardener Shoot DNS Service, uncomment the following line + #dns.gardener.cloud/class: garden + name: instance-a + namespace: default +spec: + dnsName: "my.service.example.com" + ttl: 120 + targets: + - instance-a.service.example.com + routingPolicy: + type: weighted + setIdentifier: instance-a + parameters: + weight: "90" +``` + +```yaml +apiVersion: dns.gardener.cloud/v1alpha1 +kind: DNSEntry +metadata: + annotations: + # If you are delegating the DNS management to Gardener Shoot DNS Service, uncomment the following line + #dns.gardener.cloud/class: garden + name: instance-a + namespace: default +spec: + dnsName: "my.service.example.com" + ttl: 120 + targets: + - instance-b.service.example.com + routingPolicy: + type: weighted + setIdentifier: instance-b + parameters: + weight: "10" +``` + +### Example for a blue/green Deployment + +You want to use a blue/green deployment for your service. +Initially you want to activate the `blue` deployment. +Blue and green deployment are located on different clusters, maybe even using different dns-controller-managers (seeds in case of Gardener) + +On the blue cluster create a `DNSEntry` with weight 1: + +```yaml +apiVersion: dns.gardener.cloud/v1alpha1 +kind: DNSEntry +metadata: + annotations: + # If you are delegating the DNS management to Gardener Shoot DNS Service, uncomment the following line + #dns.gardener.cloud/class: garden + name: blue + namespace: default +spec: + dnsName: "ha.service.example.com" + ttl: 60 + targets: + - 1.2.3.4 + routingPolicy: + type: weighted + setIdentifier: blue + parameters: + weight: "1" +``` + +On the green cluster create a `DNSEntry` with weight 0: + +```yaml +apiVersion: dns.gardener.cloud/v1alpha1 +kind: DNSEntry +metadata: + annotations: + # If you are delegating the DNS management to Gardener Shoot DNS Service, uncomment the following line + #dns.gardener.cloud/class: garden + name: green + namespace: default +spec: + dnsName: "ha.service.example.com" + ttl: 60 + targets: + - 6.7.8.9 + routingPolicy: + type: weighted + setIdentifier: green + parameters: + weight: "0" +``` + +The DNS resolution will return the IP address of the `blue` deployment with this configuration. + +To switch the service from `blue` to `green`, first change the weight of the `green` `DNSEntry` to `"1"`. +Wait for DNS propagation according to the TTL (here 60 seconds), then change the weight of the `blue` `DNSEntry` to `"0"`. +After a second wait round for DNS propagation, all DNS resolution should now only return the IP address of the `green` deployment. \ No newline at end of file diff --git a/examples/40-entry-weighted.yaml b/examples/40-entry-weighted.yaml new file mode 100644 index 000000000..6fe686faf --- /dev/null +++ b/examples/40-entry-weighted.yaml @@ -0,0 +1,39 @@ +apiVersion: dns.gardener.cloud/v1alpha1 +kind: DNSEntry +metadata: + annotations: + # If you are delegating the DNS management to Gardener Shoot DNS Service, uncomment the following line + #dns.gardener.cloud/class: garden + name: instance-a + namespace: default +spec: + dnsName: "my.service.example.com" + ttl: 120 + targets: + - instance-a.service.example.com + # routingPolicy is current only supported for AWS Route53 + routingPolicy: + type: weighted + setIdentifier: instance-a + parameters: + weight: "90" +--- +apiVersion: dns.gardener.cloud/v1alpha1 +kind: DNSEntry +metadata: + annotations: + # If you are delegating the DNS management to Gardener Shoot DNS Service, uncomment the following line + #dns.gardener.cloud/class: garden + name: instance-a + namespace: default +spec: + dnsName: "my.service.example.com" + ttl: 120 + targets: + - instance-b.service.example.com + # routingPolicy is current only supported for AWS Route53 + routingPolicy: + type: weighted + setIdentifier: instance-b + parameters: + weight: "10" \ No newline at end of file diff --git a/examples/controller-registration.yaml b/examples/controller-registration.yaml index 9c5aa3282..2e4107aa9 100644 --- a/examples/controller-registration.yaml +++ b/examples/controller-registration.yaml @@ -5,7 +5,7 @@ metadata: name: dns-external type: helm providerConfig: - chart: H4sIAAAAAAAAA+19a3PcRpKgP/evwFKzYXvO3XyIpL288MXRpGzzLFNckvLczMWFA2xUN7FCAz0AmhQ1M/fbL7Oq8K430JRsAzFjNYGsfNQjKysrq5K8z0ka+9E0iLPpyo/9JVmRON/9bMBnD56vj47ov/C0/6W/918e7h8cHRwf4/v9w4Ov9z/zjoZkQvZsstxPPe+zNElyFZzu+2/0IZL2n92TaBUu4yQlvWlgAx8fHkrbH5q91f7Hh/DK2xtAPu3zB2//F96Vn2MXyLw88ViDe4/3JPbuNmEUhPHSW/vzd9AtstnkhXd7H2ZetlmvkzSHH9BJIm8ZJXfeys/n9wD9lZeSyM/DBwLl8vvaez8OAEFMlvA1ib0v1ilZhO9J4D2GAPdvX868N3H05CUxLYkseWuSelEYk9lkdn7z600OvAGKs2S1AgS/nN14QZhmk9kyzHfpfxn7k9ndh3SX/rd4cb/cxf8Uf2YP8W6F6A7k26y9RRiRbPLnWfa4hv/e+e/gv/kKfv8/AP3FT8Nkk3kX56+A4DpN/ovM88ksDIi/y+Dg1WT2kM2TgOxOPnarmj+y8X9276f57MlfRf1p6Mb/wcvO+D/Y/3oc/8/x+OvwF5JmMCJPvIf9ib9eV3/uzfYPZt9MA/IwCUg2T8N1Tj+cej/C5ODNsYt4iyT18nviSTqSN09WaxjTcT6J/RU5kcFNHkRkP3bt/P4f2fgPkvlsmQxDQzP+D14eHbbG/9FLtP/G8b/9Z/fPE+/PMBOun2CyvM+9g739//BuTq+8m1cejGw/pn/4C5gdQz8ndDj78dPMO4WZnxbJYMbPSPpAghkzD3Ai9eDfKJyTOIMJfhMHhCmJU7Al4J+bZJE/+mBovGYgX3kPM+8AVMOcrHPPz7w4yaFcAkXSxzADbDEt/vri7NUlMIYUgG3k/LWUSImbKzTvYLbnfYEAO/zTzpf/HVE8JRswU56QqLcBYnkpBGcIqKPYUAHxnDBzJa8IzBDHXzmO5C73AdyHAmv4a1EH9PycM32f5+uT3d3Hx8eZT5mdJelyl9dXtsvFnALDvMDbGGwTrOi/b8IUhL178kBTQwH/DtiM/EfaVsuUwDc042LvMQVzCM2ujNc1ognCLE/Du03eqK+CPZC6DgA1Bq2/c3rjXdzseN+d3lzcfIVI/nJx++Obt7feX06vr08vby9e3Xhvrr2zN5fnF7cXby7hr++908u/ej9dXJ5/5ZEQGxFqEsw9kADYDLEmobMgrhtCGiwUk0m2JvNwEc5BtHi5AZ3kLROYIGJqjpJ0FWbYohm1KWkN7U4mu7vL5GRJYpJiP53NduF/92Da7RbvpvMkztMkikg6TckSRaWm6Cy791D7FaqQl6XTW7YrU5Ac6pdX1zcgNf+LvPdBOmhCGSk0qLziOb+8uUqTBzAi0xP/MZumCVT80cvme2jkKNkESL754cMmJZK36zR8QIHbX5dJsoygHhBh+1uyhhbIob5g4s3AUMZKrH+nhRaRLyAakzwKF0+d92G8SO6i5H3nQ0pWMMCxyWD1QRcX3JggMXbozKu3JF9t0L7BX2I3wG4yT9IU7HCvqm2vUduTdR37aE0IHln3zgl0ZKj+bABPkL3/5/jg+OvR//Mcj779f4VFPujcbJavHdeCGvvv5cHxUbP9D/aOx/Xf8zz/+Mfun72HcHXiZSSnVk/+tCbfrrBawDA5gan1X/+aINTk1Xuw/AKqeHEpV1g3VLfOJhxu6gVkEcZgZEl61gzL7njTCtrfRLk3Yw6HS0Q8+8WPNiSjkG9g2k9h0vD+6eXpJp57xy/pz3B1swGj9L23M62QEWAPfzN+z1KC84df0lhsoujJ+/sG5tQFWCBoQlFJZpO/EIadwudIA6XKvDsy99EkzBJg66fNHQhEYEww+QFHFIANAnZsFK7CnNle1Hr8Agw0rBuY9BAW5ys0ab6cTS4W6CAjfsYrEacusBgzPgHSd2EORiaY13cE7dEADWKfMs+5Na5qLNOs7nBR1m7xsazhAkYKYNQEUVZh+hMV5+Rb81au8VnWDMMyu2bVxgqXvDbeWjMIhlKcL7ydf8+m/57ttLAxujb9Tva70R9rDY3Difs9sJFpY/Oew6Ai/45EFg1OS+1IxavXPvtdrI3+Cb0S9D0sb3b+24638+uO7XireAOiCSw+nuaRn2VcukYHzMgKXp/hQhKGzs7/+Nbbn+0fTveQQVgO3cFKMw+hY+B44/zNfgjzglXElIFqCjYRjKvZu2+yWZjsPuxXrSujsd+bxh3J/f1mNxKB+dH6voATd4equsIVNF6jfu797Ir6xr2d7N4/ODo+2fG+KJd+O3+a5f4SF2vFqx2PvvqS4/DqoNCqSRbmSfrULlH78q9//c9WMTEFrzuEXIid2BETVODHnjR/R4/e/ptHYAuAek4i4rgfoLP/9vYPWvbf/hG8Gu2/Z3im0+mkvgeQ3vnzmb/J70GHf2Cr6ErDvgvj4MQ7Yx3iGjrEZAUKMfBz/2Tiecy/D4M7jKHPBEZ2yQyHucfmuQyReB5uPM+ye+Z/McPHpz2OzEPDbvautNaQeXPeGnyJUIXoJonnDF3TYpCWYfiD6d1Ts9QNSR/COS2YbiICNTCF0uEPabJZ0+qYejs78E9KsmSTzgl/l7FiWeOPXZhPwLL9AC3J389TkuNvmAnveMklyem/UZixH5s1NB+hPx9xq7bLAFZUTN1tjB8Q6jFJ31UTnoBB+IjuPtKfOrTObOmnATp+ZtQH1aUGMGvuWso6L3ahtfJN+R6aOA1J+88WkB/HSU47v+hVCzh5jGt02V8tkPsEBkzwIYlhGozCeY1+90uraJTM3zX/qAMwR5o/h6rI5iTN0WeKSlv5sUJg1TT4a05NWMYOdGH4adRfyQPUc4tgDdVa3PTwZ9X3eF/r4p6DMkpWxUtqVYVFyxnKx1npMECb5KlLcp0EMLg2aOTW2rMAQk3AAZ10YY1pWBCYVTCslRbhcuWvJYzs8El8CkMizoHvnW7tfOyZaHw+xmNl/93B/I9639IM1Nh/+wcHLf/vwcs9AB/tv2d4+tl/37EOMZqBw5mBUKvXZIF1UOh9RaMAVNcm79EE2eYOA9uoHcoQc9ZO5/NkE+f9mhf/yNa+qM7oa29c2j/7Y6D/0yCbPuz3iAVU6/+vjw73O/s/h4dj/O+zPNzniJ7oL5qu0dOrCz4rZLMf/czbEdnjMCvsfFm68pkhfXZ9nuFQrk8rkrLFjEJt+Gtut56XNrxgXqnWbDPh0nCcQia404QVAIuCIqiSIqNhEWQJBS9hvQmvlmx2EdZjXCweihnm/PLmFVT8E32Fq6ifGq9fs3WV562jTepHJ821NhCHuSu/rJAW62/2EWyITeSnZSmkks2TNVRNOTsEbMlCexDDAUujIKAdxY+ucKsDPf7RZhWXNNhCtRa6+v1/nl/ixiVuzFXisOe/siS+8vP7E2+GVYj9C4nXIFjbQ9naO9wuPcG6RTtIQrVwRlBgCUW6Jp8VkLdNQEb49q9Xrywp+xnG8pCgYuGLchamXflLI3Y6rFxdv/nl4vzVtSU7tG290v8gJ4z/dCvg5vb09q1t5TOaVDXh1k4egvi5v1qL6RcaZ1YUuBXAM25Of+i2RlBnu83IYgG2FZ6MgBlnyVxz8hrowvBOcHr9w6tb20qgbikvDNgeI25T+0uPj08MFyxj33BgZE9gS64UI4Niuwg6vL35y+Wr64tzS96wRZClCKpGXSV51K2O29e1d8Xe44l3AJZFhw9UEctab24xgm44qCMlDx+Y6mwy8bc3l6/MuVDXBvS2DEPW2Mu7ItQNeiPwUgVItsZHl1GOp8Prz69ubpo914ZdhqPc4WQvcQt05Vc6F0MJ0Xp4edP6gLTgY4r2Rf0tXev8Up+rqqdROZ9XRgnfQM1odfB5Abo244XFptBNRYw7hY5Ox3ILdYJ2j5fQJc+MrnQADc5Umyjgs2cOGObJMg4/lLgzFuWBYbcwJ+ctnHQWwoH0gCYRPfVEA4NTglS8TVzD98CsJu9nPHOFwZInNDI4O9ndXYZ5YSXNk9VqA/bQEwsqxejcJM12A/JAol3Q7lM/nd+HOWDfpGQXKnJKWY+pE3S2Cl6UvsLPW7wKuyM+dMZXtgNO/rhp6/PiTJaquotue/3q5rZ0SNImabcBrf2qYFY1BFYb1AqNUsaI7DRZsZMmcbBOoJ5ZnEQUkrjdCLCMXYU521KGJsIWm3ln1IVPg2qo6zeYeRcxvF2R6AwtqG03A9Z2NsWqNW+Iuv3bLcDqrvGpsP3qj2zM4TPHAf06Sd5t1hfYc6EZu0Ctxo8oOOvpAE910tnl6c+vcCz6uYexY1jN2OzRA5tuLq4EWD2038p9ovYDaFd+TpX28aHgu1illxwz400rDA2rqkLCgmSF8ftx0+5rkhS0Ez58UtRS7DcVG7GSEhg1BFcWOmZKSGTkDqPSmLlU8BbG9wTmBs/P+bkANg4FaFW9DJ9Y0hodluqxhSV7QbnQEPV6w2op2GA+KFNemGtqSwwVQS9ibqbqjighzC1HbdszODrBpUHmfUHHMJ7QOC3efVme38ihV+KnrlVaPcW454c3SCCAAgW5knQQTU2xz36a+k9diYE7vbgoAhdMJtfHlCHXa966pUzVbtF2xmqjj1KV99Wp110rV9hE0xS1UW0mqsjP8rd03sYFmX6OaoBXsaQ4hssVYDGoEbi7KK2ecqu4/RS1iZ+niNZWUXMjXSuO26LAiIXkjh3e+4Efrela4HVJ3Sbjwo2glbPjrrCVpu4/0VJruGXYlFeeZm75hoyI0/rXUq18INZtZarZO56G4uCUgYSfiHYzao1+vRKX8lqmqE+gb9eQ6EGxQp3SyaezDG+UZkP2BEO0i06UwXIEVUnj3eauEShSQFLl6/3jX5P27u+QbvpaxNToqn9uV/1pWfn0fcNfX30TOe2bsW9Sx73PPtYd936d5lDe+7brsejP12Qxq3pux8sFn+neud75piKA1SlC/VPzvQPmll1fYm4ZUY6YaY3L0Lc/GtBgHkWfTikdtKft1wzhXZJAd4/FGG287Msu7po5NjokR4fk6JD8/Tgka9OPyDarTxlqf5ORsSrxoNT0qQi5ztUl1x3F07xO6eqivKOEr0h5LeAtHL28XaJxI+SBjR8J9Y/YEY0lNXYvXtbci0NVs51TETho+BQ71X3OTsuWh0ElSL2ue5L7OVDZFX145iaUzjEptL3qAC3bqUlWWN0q/1LbKm1+rY3X7XmgmPmjXTBexAE7ZsE2QyrGcXYqnC30Ah/mqUvSubiBq0tNpNXYtrGKx9StBNPNHH3+aHfwMwQb5gPCmriLyKq4DImAAcKdT8LNGq/mjxrXxtq1MTslNC6LNcti21XvG6zW7oKXvhatdcujW9JlbtJd5iacCF/h8sjrgZa30siazvuPsWzjGIsATH/FY8KbmN+i7sk6mDXBN+T9OuS6p+VtV3nZ2/UHVn8YvAXTohsp9At+0ldgg6nnjxr7ba9nh1nBjmtW3Zp1oFXquC5tPANYf3Rbp7DGqTIPq50DtPdqV9UxYxAbuYi5YA2OzVvEiHzBb845QcvlSwF1tSEI0xXV89J9vQbzb9ZsumLCcqMVmpkKUDBedErGYg2SHTeOnryHkJm2t//7lm9QO6yh6VAwWk6VXLPRU6/sBnfISBnXwr+QenXf3pPyoqKQqb2wXBPzU5AlAtDzQa0e+q0fFcFKHXH5lVJkG41iwKxuXSgOBKgQSyrqEwqeqkwIPTdF12Nbl1VX4hMeqFCmN+gRG/ZrEbHrblDFLvwo20JcgWoh3TUwK2TPuUweWFFaa0beZ7R8vip362H9EM45i2rKWv8gtZvNAtA2qzusgUXZyc2ErypAtvtdQEh4tHG1mhHTSobZHmQnZbqkpBpX8fkP6Wsob0IZ3Q3PvQt/VY9iargkii8ir0T9LhupY2Kddj0TtaCpbW2/t4alzbm0rR7y+gSOd/HuXcSQq093cZhZUahD/eLy7PXb81fnv56/+fn0wvrEYckMxi+pWaEQBozgIac6G+Mpp0/UCdNCPQYVjEEFf3DnDV/V396+1tr7hQMAYKuwT1yPlIvMBV26lQcE0HGNd4Dl0fARqnyWMGA6o5epZmDbzAs/7CajyUE4DnoldSGcH0WdiaF6cDAXpWjilSQjDLy8lprZvSJ/lG7tQ97TecZo8cM0B9tHrVbULIkbusY4j9BAlDsRO/goIprx0Xpn1JHNlELcV6hyY/iTkUq5yioszTO6Z2zuJ6lMzmqlV+Z6aW5As3qyZczz3k8r+3+65qmJppv4XQzL5Sm7s72xOKsedmOmJOineU6BXqqIVie9T48fgqynQas5oLiXQ9gKyxCUp2yxO9iROrpCgskFNDtMFTSDEU1SVMWPUI9mcRTQl/YqVkN9Y0scAmY4m3VriL3hsjzeh3OWlYmxKJWAuk6LU16sPhylUJ/BMzl/Qo+dcBdYdT8HV+KlRZytYWoNUZVLfSL8QpPKZSbfKZCIQ1XNIPMMMs1P0ZZgYqWZVRMsV36Ui6+8BPOACuercQIaJyDVJ3t3tYVp6IPR62MGEdpt64Ziy0b8aHZgySFvS4NxaDhczKJIX3Hgoo99Mn2pHCFmcpTek09NDuWcMx6R1XuU2Agpd5+EstSnY1tun+M0rdkx08YGm6s4ZmZBW+3IlpajrhHI8VvUNX/IfbTuVf3jhtpzb6j9SNvgb1D4qribv7Wx1oYQbbAJ0zFId9ruP7C9ocZWWwtF7b7K4eKB2Q7HXBhjiwJ6Z61Psjjb8eDluEcy7pGMeyRKA4zpMa2tV2nWcgskY5dklH7cdaWazcnjg2hu0Gw9w9SjkiW5kKNGqRpj2C+KBTr+bt3e2XyYxTxHLFvwCLJFeKK/A4fKw4FbohQ4GoFybCqSVqiLIc5M0MZUqGT5vILnPGZ4wo3eJ1e7UQ/VrILbguOZxztY4eyjqe5ZuGfhBczvQSnkMMxAu9TIfDSTv37vkFmdXdVL1GutEQInXm0/h0RY0xfnZrL8jcEKpQgI2AC4S8K0RON24OeUyPHgqUSZTcuxaKyPrV2Tc1mUaHNzpIyjZGODzpCMazotl2PH2s+APp8byrWFV+m1oJCBb0nYpKzKPgHf0qdwTtbUGfM8548Vo1U3udSnF/l4F8wvjZtBlTO5gbLQXRQn4OKqcV3cQHwwNWvIwcX5MGTVhzjourdsIimIJFVBE4iJJx4IJmHcIm3+h/Q+0dSPo8Np4APjr6FWuz4kfCvyGxW5OKWuoqjrKIoYhTGZSZvwmMzkN53MhJ09ouNt2LQi9Nq+MbdIycSYW+S34VEerz6onvHqg9+TZ7jK/SBaLwx2I19zqi9oltviRcwm1PEaBhyGHFI3YPNQu3C3XLnSeP5EIWgSGh91ZzMsO+sfgaWfpKW2WW2iPFxHxOSmMNmF4oU9oOWmukaCzaltK6J4evtkfo95EPCLyFQrvzWMj4r+MAfyP4Xhu0g2qPDj1npDKy4+izDN8u/9MCIBXZxh9iFtF/keC3kLWop2hShpXgddYu/bYZ8p7G6LbtAtKiSzhh+zY4zZMQbIjtFvMtN11eef2cYcGKrSQ3ltMQM2LBD+oMnnjfO/38EywzEJvCb/+97BwUEr//v+y6ODMf/7czz1/O+49HVOAt8jCzztWmMq+C3ui6i3Fqbe/7p5cynfLTDadGhvJ3SmgiYV6Q6Bdq+hs31gRUlExXhfQbhdYES+btDoNhEE2wNqGlo3v80WQns7oLUVIBKumRRSnYms2YiNXOaaiuxejzzQzYCi/QWjRq2W7bo9h/Z+gtAt3rXURERrmwSKDYbO3oGCokbKprO/58JLtE9gwtrgJwkq1dXY+qWvRXu/9aOdkt1fJmNj97ewk4t7CN6yawi+b95CINoc7pqtNZOVX1JZW49KtkVEfiHxdsjWguuHD61/5i0S0x0R4dqnvROyldj5LUTOP/vuiPlmiLCeRZsgwhVne/ND5jk1SIyuWnnLV92S/QbpyllyM64UXpIjVBKwp12wdwOwRMmWB8TedZK41LPMrdD1z4t7ScfLLusnau/vFj2/OneUYr9Q6FWVd0CtC9RtIMjcnlJGVN5LaSGh1/EjDp5hurfYcSfhR+piazlZBSOhW7T0zBXmWD1gox3RJo7o0PntDMMo3b0GY2ba7XkOlPlZhblZDZZ9kpysonyslthq+/eiHKwO2OrpVaV5V40WXo3EPYKkPQxJ/YJzO69A+5hvY6X/+8tILFmDFXfwj4uwT3kR5hynNq7KPlLM2vMs0xTpYs0iW+x3LJXpYZWBOMq0sAOkhFXuPstSwf5m0sAqpZNdhdkn9auWoPQGy37pXr22GdF87FK9KoRQ5luVp3cVpna13N2XpXOVpXLtuUgX56TpfWxSHnjZNc2KR7L63spxU+lRU0mnsFsACkr8hteEY0bWrSwHmxt2olymz7EUUuUtFeQs1W3ECRONGiQzlbnsuvlJh9zvddvpHXz996y5ec3WetVVXeNCb1zojQu9T3qhpzfheqQU7JhO1rlY5RafMgfrcPlXOyKYp/5ULVqleVh752AVLm565WUVYrTI1apc8JmeE3PLzypk3Shnq+MCT3xQSerykOxCD5ybVSqLPCfrIPlYO/WjyM/quO8qWwl387Fuf627ZUVprBklOVh75V9VOuCkeVcHybmqPmlBZ88hTuDJc6wOkF9VcRBP8umP7DQYU6tuy2/QO967G3S6tYDqIUOpJXlBTRKNKvOGGtWZMBOoPq2oIk/oGHXcdIJ8vITAYwTy6BNhz+gT+b34ROS5q7aV0tQx5FmcvmpLKUyd05eqlg6KrHFDZ4xTXK2quYJDFnRKscozxA2eHW4bEkgXJupUpNtJQ6pYJTmmH0W74nUI+k0pAEKB2QBgzLtED9ruMs2HEzDjGRonLs4y4fAujjd2l6QJvy1afJ2EajzcbdJMv5L+DqFwqCaPGSuS0WG9RtKf0xef408YXYQE0iSpVGxmBdAt+M+v+RRwRdJz/+nzrzD9Z4QmH45v7J9FZATMYqskAbxi32DK/SBthNaL+7RRXlsvTXI43TabkqNj/OGyXXwjhcfPcrIksndP1dSIZQLFNej2FxvRxhK8b0puOk6kmXZ7Z9lllSH0vlh0b/NwIousuqrUtI4hPJZxSINk0TXMoOsSWJcLTnb0zpgrSvOnzpYrZV14A/t2MuRuITvuaN/89uybnpsBhisE18y221sOWGayNeja+uBXp4ySW+3Nep6dskc+q33+DAckO4w+y4HJ/tlnhQavxBh/9uOZwoOT1hlme0ylJlllx3H/sXkedwXHRLGSMlvaHuymRZWkRNWE8X5i5yM/5dS2YzjtuHVUdlZpvY9bR7+drSNxttdBMr2qbDKjDK+9srtKrw72pJldXTxVsmyu28jkamv4arK3biFz6/BZW7djXmuztA6UoXU73Csysg6ajfUZlwmyqFiBYhFmXO3pHhNmWB0ku6p8XW2SVXUrGVWf8zKpT+I4q4mTYdhjv5KRo86IqsuG6pwJVTNY9RlQe2Q/1dBWZT21zXiqJKXKdKrJcqrNcCrNbqoNom5ryD+yt2RMbDpe4G1DSURlvMDbNepcknZz0FSf4/3af8BI92fLrDxe6za6J/kzuid/L+5Jec7C3re6bSfNqNTklxwbd0stKhVLnMFv6HSiUvLSzGvWKUSdHRGCi5UHTBm63SvI5elB26lB+54N/1jDSpFaTzpyTNJ+2qf8dO5gv/04pi0pCda4Hd4Vjb3VGCvR/s+zB0/J7ra3Sucp5VrlueuTwlNKUB8LJk/buY1JQ6VPnmcG0dblcLf6O6TilMj4CbsZ/9hJOD/io8//GZB1lDxRL6hT9k9N/s/9o73j/b1O/s+Xx2P+z+d4uv79dbb7ULjyz8u2FzjvTfzmuLRp+M6r80odRzk7ikShRs9/6flPCZ7F9jMGyFOs8pc02pFRKR12k0pvXxN2rJBieQixgX8MUe8+saOm3tGkHclCwzNe12p/0Mpzqb5CEXH+Wp4G4f3v83syf5dtVrvrbN3g84s1bp54f5rdcqSz74AYumO9HQSmCm7nS2D4n1527x8cHQMWZINnyS0z3NY3sWd0ckuvkjT/Mc/XlazUUAOG78mGSgrTuI/NsoMz3o4EaJ1gJ99BgobUZJios48w51B9fmWwUaONB25lt2FSdw5hsNHF1dlJ4wo2fHlJ8sckfdf9cHVx3nyZkfkG3d5nsFJppCxbZD8wZ/bx0dHLyjZLN/Gp4stlEl/DFNA6u00/vYU2aZbhSz5QqpUXu9zxnFZrJ16LlREYrqhNtFOvdvpup+wO9M9Gu9M3V6BpWQRWQ1PQb7N1+bHeSNIa8tiZ7as0fIDF9JK8yuZ+5HNnVetOvHktXXTr8FmatGz7qXf6+nXtzbogEHTx4qWNb+LoCSv9ewBifplW9fvpMqubrtMp1vK3TjMTe/hIB+NaPPxCehR5EdI9xiQvocpNkYyQgEPlT1/WUSN7VXFksrC+vZ1T7o0Os+olv0BSQhw+Smj+m3eRtw7ofuUto+SOnqaBcn7u4ehjlwjg6dFibGbYn+nBeerzpMtpHKD+nEZKzXb0ldKSl91vOWUnuafM9w2V8G2th5qgouoL+oeiOmkdNLemdmtUyi/sx4zfG3rBKw0QC6EbNesgnB5RRzOXfbCa7JEMozIjMZ4lDtSs4CTybcNcqGHAjxpJwA4DBLv432x37sP/ZvM0N+KPlplBgXdEV18+gDSJwAvXemF02eQ4Y6fX63OLmAUGPmXgpeoQGqW7suo0ICzrvb2YsVRw0zrTnJaurluvXrx4ge4X6D1L5i4jQVM/yRqqOcKRhTWU/Znf04E7Z62KKUCmqwpGrjI0CPUdqYUueEBdGHyHpvBN+KHTbAXA7A4hphmAKJhTYXNl7Wf//TVhkRcy3lb+e2hyCqNnTozPmjuwL1DtYkI3bR1yWOy0M7sKNaQyAPeKahayb1bnpnR6CHANY5NeE0RSdgWPiv20Ap7RW16MeFeRGIbzV+KZTso7nxltuReQGYb//1yru02d97+vzXqMHL01z+XCmaefukqSSDhSS8AZvxgCLIgkmmlGqQl6d54NmLVicljubki+WStYy/C7CVsdRNY8PWbXySYnRy/1Gvkxm6YM1lIhm9Hoz7pCHYt4N9PGhlTcudfq4hrvNqrYiMAgbMsUsYRxvR42JDII8yItLGFcrYQNkFsz/GGTEiNrCQEdTCUT/H2ZVozKLtdmY9KIgivf2vFYcm0zGg2QD8CwbCQKWdaPQyMCA7AtGoNCltUjUIvYiVV0cgLG8zgzHIZrVsBxOBqSG0gU3eAUymI+SE0J9pTGbMjWZbEduma0hhNDOZBlgpgNaFN6wwkjHd4yQfTD3ISOrQB3YRycBkFKsqy9T4cs4+epz75Pcc9MzqICky1T9E6C2zxqc0PfT/M8knMhKmpNHteVi8g3M0MqaHvlZ05pEBnkik8mhJHWs6DVSwydxmsJYaHujMkMxb9E1Skk0Oo5C1JDSSHQcQoJlArOkIQ158lqjVGR+lHMAS3Hrwn6vjwrRm2HabPxakTAme3CMWdgO1YCOHu+bckOKJZJw7i7xK0JDyCZVsMK5bJRtHY0hxVJpnS1Qul1ry3dYQUT6WGtUGp1bEPPWRhzP2wljqtD1pLocDKZaAlnT60t2f5imasINxeuHcVB5dHqB1ffri3VQaVSKgcXp68NNWdJqFvLyoJw8wZbERxKGhOV4OgmtiPZVyBzZeDiP7ahNqAkWjXg5li2ozigPEoFYO9xNqfUSwYLD3RLGmdXtAv9gWU01gzuPmonDgYS01JfODqvHYgPL5+ZFnH2ajsxMLyUet3i5O62Juwq2ZnM01wIYeJyVuBy5qtyh9kowF6+aGvSgwpnoPn6OantiQ8in7HK6+G9tqU7tGA6XdfLrW1Pe2jxVErO2d9tR9NVJE0YYymGRRCjAWpnbkOaE65xuzmeu5FyzeCneP5/Sq9ZoFuEUMSAfT0tZzHi7CzyM3mnocfdEMKATQGuHnydk8jvnEBp8BUghBFfHVw9+MJedE2yp3h+RdIwkasSHGS0k6YUerqm4Eb8qmj0ZF09tgqWDceVGKUzh+nT9SaWM5c+TdNNbMBWB48rRz8kyTIiVPEFNsbNkpZjO4zO1o0F8WHlM7BvpAJaGTg25IcR0djEaQvoYOOYUx5cNp2Vo5LO2MyxoT64hCpDRyWdkaVjStVVqgvpcdtShuY5Yxd0zszFi+QuSt5breZCXsh9LWdKdkCxDPScWC4rJWdMeADJjNVbQy4H3WZIc1iRdFpNKpSxSjOmO6xgKmUmFcpIkxnRcxUGL4NmiU3O8MIWjTmMtxdO2T2JU3rBi7FBrKXjKsAlyaNw8WSl6WJWxl3RGRIdTiYDNScUykrLmZLtL5axjqsL5aDizCgOKo9OwckkMtZvplQHlUql3WQSGSk3E2qukrxZE7xTZf7unOAln4DfXEUkRdlpUBR2UxWWTAwvq4HqUAprpUJs2RhOXGOVIhLWQbXYcbAVOXWqRiepscqx5WIr0qpUkE5SI1VkQ91ZQrzbOQz0XryEA9q48lTIXRk29pS6eEm34SLV1qxFhQ5akY1EllLmONSUZn4w4FCGtS+b5i7ygmU3R7kRvUGEUXeKphCG3UOF3pVn43nMYc7axgxlMR85zT3bmWkM5xXrOWT4GeOaXhlmbriyK8bcbFUDWsOIYWCTtuWwMkNNqPWTxHyYMjlcRquW0GAiaEduVwjzAWxAbDBBlIO5K4TZmNYQcWcebwcONhFRb5OnJZzpbrkCsyuzN/RGRP1Eym5OtJpGFajduRVc/lTjUXn1kxSJMzd4kVSWh3N97RWQVvWnQu/KsypI0Sg8caDIRAyYoam+QXtrq+9DDdimBnVE7JnHv7ps4lsVN+1SDnT5ndYCLVh+UnIgKm/NxnoD9vQijLotVX5RMCEsbctDUIWLqVYxjUA0w6WLAeoe3Io6uEW4nAKTNVMsZK288xaEBSNqjt86DPLIuDKxBJhqJbCCWTMKjozr4vpcwvkMUFtzKwnfM4jaExV1IC80PwyC80RFHcjf+umS5NI6yOlng6qQ4HHgSKs1LKMCNSgdORRqCrPgPwkGB0Zo5qubIsm8kcrlhaYsuevMSQXbEh5MMkmlyyXSNoQZnZ4CyHVMk3MjnaNB3JtVtULoMGysIIyo9OP+1Xt6Ifw5zR6r5Z4w6ClLNmvMvJxIP+Z/6mYLaHOM2QIMuWxh68earVJxVybb0yKm6sNabQyvL/jYwMRVSUozUYnyOLQZLwYiKzal2Y2qHAqDkB1aLFrISa4HLOkuWIfwEJJhbV2lZBG+NxQJG2e6piXsRBFTGkqGWhI6IxEovL0AHTJD8E83YS+0CopzT7d5lbl4DGkMxvobmmfUin2WmtRBhC6tIcS4Jn600s69XIKUAtvx3iUwBNs3JL9YxklKaNWY8p/B/0NajDWGpSgKokPIpFqnCKQxXbuY0XEQoNgGvmapHfG1+aqm3LtPq9Kuqxs3RgaXWNJ0ekm1TWhHdyDB5KsgsURGqyFDQoOJoF4dSQUxXiVZUR1GKtfB1X9QbX802Q4j5+GzvXFjvBoQCuS2KnBiY1viylYJ5vLqVwtujAwpsXL1oBLVdBVhSXlo2WSrCp1o+tWFHdkh5ZLauiqhDGxeC4JDiqOyFxUSmdqNdnRtBWMRMVd071Kw3cQ+T9fFdzmnCkTWPCn9kcb+RzkaW4YWCRjtZ2nwdh34eaeZ6VdQncF0Q7/LOZLjseVoWR0UNrl/v9e1ABa0hhFDHmjX7/S/DbV+kugC7Xoc8jcnNJgIkkC7Xmf5bYgNJogg0M75yL4pEWvmU5h5xSsK+km7ZpAgsOWjOP5rol7cD+GbUhmAe7lW6XHW3phODwF0ysT1SL0hiWE4l+gQ95PzxmSG4V+gOtwOyBuht+d5iTmFAKeh548XoKy7ePusCA4ijVjvSKTQKR0D/D2YlnjsGtzqvXRqhL3YU3jj2kyaeeBMsLtzrI5PqHNsvDYwQe7OsCAmoc6lMh5BisWdHRtl4KYEtjP6TYa91XAfdpybeRjrzDp4FW3IDSmK0HuolUXjMbQi2FcauWdQIIaRN9CMwhB8C71+ErY1nj4j9H15lsQPCDjWxw4Y4B6EXXHMgJRlXbyAIY2+rIt9pwKudf5SPeK+rGpiAwQ8W8QFWBDrK4fUv9uVwHI+GtCP+65rcijNjHf9jIt3mzvC3nSoll8UxIWl3Xng9xSfk3WUPJ2lQaevVaDlfcgBBUa3raKXGZJw57yrM2usqrSkDIU7Kz+HS/buQll/qwIMlLhRzcnw2nIaET8j5/yvNoP04zTgX+VsSZE4cSOy9xgnaqNOWNiJg2sSk8dz4gdRGEtYSREEujuD0TAlxefIHYuUwnv0ZMwxiCle0qflTYzNkbU8fRIvhwrOMNJLtwRS4bLmy+RWQ/vLDLVo7flcviYPpHPUFt5D3cEHFS/dorbkcZ2ew/+7d9hWX+QciEvb8hDLbHQDg1xY1pkBGAx+dDqfg0HxJo46NkB1tjFCwKlPIacJgBpwqEBuzbD5jZfOF10a0ujPunwrwf0+S1Mq7tzr9hEcr600IzAI25JNBOfbKU2JDMK8YAfB6RJKE+S2DCeA5DVOZW0O8QObEOUMCQtbc2B/7WXv2y4taQ4nklx/9L/U0pZqf6l0eqXn3ZV2BAcVR6Jvel9RaUt0UKEEeqjXTZQ2xKwF0dw8aXPhpAqXLV/raLMM4+8Fl4WwL1P1bSHi4tZMaLZ1bLZyVLhc+BI1lkEbiYpak1fdWWl6VaUUiSs3Uk4MmBiGAa2TrLpO0spHZkbAme2uh6ziU+UgkyBw5kPlHqs4MvSOabE6s6nb63W7+NQIfS+ehQrD5n5TJTZb1lKNSWNhvqhQ9WBLYppYmSFqdD2YE5gYxuaEHI01Q2aXkLrcPWqAuh+38hWC0xWjJsjdGNYZ//Y3ierx9uZUNngcLgw1wd2bX9F4srwXVIfTjUfmTpyTFLOAzQF5ponYkxWzCd9zID2UdNZi9RBnGDmUt7WaX9KqQGTLU6a+lNXiLlYFJnum0geSXiVp/mOedy5hZV9hAZPm03v4rmJJhseFo3BOzEN6eQHnkF4rgoNII25+iRS6rmCAvwfTkpDeBrf6kF41wl7sKUJ620yahfSaYHfnWB3SW+fYOKTXBLk7w4KQ3jqXylgbKRZ3dmyUgZsS2M7oNxn2VsN92HFuFtJbZ9YhpNeG3JCi0ALWsjxgKTdhOgT7SiMP6RWIYRTSa0ZhCL6F4QIStjURBEbo+/IsCekVcKwP6TXAPQi74pBeKcu6kF5DGn1ZF4f0CrjWhfTqEfdlVRPSK+DZIqTXglhfORSrj7YElvPRgCG9mSgfhCYNRKeMNVFd2gerbA9KbLassfZo88PeylnolnKjqzMKHAwBPeL+rAonfZeJ3gC1G7fySd1mIlfhcudLOGmbT9QKRG48SSZl44lYiqUHO+JJ12qiVWJzY008qRpOpDIUbqxoJk37iVKL1o1P7QYxA7PbHTZA7catbBQoN4VFRd3Iq7aDOSOGe8FqfG7cSe0bC5tGgcmaqW5eJmU6prxfGqYHGA5h94wIfy0nKyhnS/qDQeYn24RPOpwCHl+8eEFfLQmoBj8ngddA2Sia+vGSeH8Kg/dfeX9aRP7SO/m25MQPghBL+NFZHUFTJOCcFdyKAx697tlJjRwUZBH8CH3iOfjiAWma5Mk8iU6827MrGYckfqiXgj9PGqB58ld/FTXA/+nFYRyQOPe+0dRGFD5A42TZVZrckZMaa7jB8APJ66+AXz+/P/F272FKyu8/ND+514Ln4a7Oipx4P97eXtU+hDG0uh/RvZ4bPMcVZCfey70aRB6uSLLJy49HKlmLUz5ZXShBDZZwtXrc3xP0oSStpN1kebKaPSTRZkV+TjZxntXwVVtrxVZuHVu9UJu1ek8VkGjUo0AUURmZUMI663ChEwaHxgoJXbG+gnuJ2e7cb7Q3Gq0nHv00bXySjNfGK03lm9Y7hz6RDTudoFO5FGxHsDl02LtLWgRGSRjTzQFvh7zPSQqKjeZrWvmxv4SREOezxSaKkMCONwOqja3dOjEDBdesnBagqr9kxnqk84J7Ik7nc+wKrlJPhAIt/TTACQV0CgnAzoJyYV7b0FinYZLCG7pPxEgXRabZU5aT1fQ/9vZknOOLxzC/L8nFSUBuSATLgaR2WKv+VqyOxZXXoteh5i8WqPVq4hRvzKnoaORJRJharnWH2ksnSp99pEfSk3ahmdcRBjDswodkWlrBsyeQxpLGHjzHh4f0X3ja/+4d7x1/tv/ycP/g6OD4GN8fHOzvff2Zt7cViVsPjFg/9bzP0iTJVXC677/RhysHH3rhF3GSS8wPpiUWIUm/1OmRzmf2o6Fo/HX4C7PSTzz00Zaw9ALH3Yd9P1rf+/uTdzBgTrzzyxu6OJ6sSO4Hfu7jAGOTB+uWZMoQTAsa/Dt11zCj6prQ40+zhhcnW5M54kqYJ+XEo5K0mKmZZAqRhGACBevPc7AYT7w83ZBJNfw/Xvvrx/99Ajo/aCx3LHWAbvwf7h20xv/LveP9cfw/x1NbOxb9lzU45hm94g2OfXc6ndqP2x+bqJ4EQxgHTswd5sWAzBqWQX0WLb7UZtNjNrJo53zqlmDv2/CfwMz7aTz68b/O1i6Tfu3RjH8Y9IfN8b//9dfHR+P4f46nPqTZUIFhfAejtBjFV0lwQ+YbXBGoxrD90oTuV3Fj+Z5Eq1l2vzu/97kbRIuPgtbWOf56Ta+rSWOCIbthsmvOW2vJ1EUV4tnIWGRMyMsw/MH07qlZ6oat7Qot1FyKZ7yqZ4DPT1dJGYjeAYAfmEO+Eajux2DCFYuQ1rJPirkSgL6uALEXtGTyoyh5JAHLD8CSZ594n6ewRg1XZJdHen5ujq9IGV7hO/EE+GraWiaVqDqo1wDf1oTHCcpIqCCZvyNpwcNXQhmN0LvJyH8WMyLn8cxf+3dhFOYh8vh//i984QhOA9FHWMs/AFHohifewo8yUqC6Kj68yuZ+RLtMBZGSv2/ClATnabJuIJ2g2+b09etJw/0z5c4Z+ImmwyXJH5P0XYUOX15cnTVfXF2c1whu4tPsbUb4hJ9uIqijn0EzX+MH9g5tFK4tpmD3A7vHR0cvD/lae+W/r15k5HUYb97XkVFEpzGuDLLNeh3Rce9HP6TJZp0NQ3WRUWzDIEuJH+BlLdcw6+AJY+Z44euGIfW/fv5Pk4j0MwA08//+8cuXLft/b39/XP8/y9M269M7fz7zN/l9koYf2OL/3TdUjz0UFsE1dIihjACTZfpoKkBBVChQA1MoHVZaa+rt7ExaOzNTvku48tdZ7Rub1yZ8p8/Wf467daAIIugmqPxJesdxLanen3osaY4DfzVcNJJJgGOeJGkQxvXe2EVKa2w4hHWMz1SFj3gAtVaZzzL+zfT/HQz8MF46TgM6/X9wtNfS//uHh+P671keR/3/HesQ4zTw3NMA1P01WdAlAddpijYDqNqU3aOFsg0N6KMTEMN409ih3H7rf+xx8nt99PqfLe4aO+iW84BO/788bNv/L/f3D0f9/xyPYdRGe5ooJ4MbtvQfYh4QBGmYaAa8A+rEe7P2/w5L44KHuT+btwKqGqLRqJMZQsEPgV9JDFxet85+G6DnJboOrPpeq6Aoi/eaVVEvH6cRGBt9GiKPMpOW4PIWrcHKGVQxLyeu5nGDR/+Y6H92mMp9AtDa/wed/d+D45ej/n+Ox1D/S9QO7RkNvaPCx3vSrLZNIdi3aG/gmmKqdngPGdZGHNcg+tAyroQpRJkCK2RAKGCe+/G9nbMIuiRJL652io3tMnK6iJnkRYuAvXbocIMMfq0H9zUjpps77R9xRfQpxML8ER9j/e+zld4W4v/294/b/p+D/ePjUf8/x6OwKRsL/NHR81FCA6p97nprnConUMDYmjvlpf/p1edMwTKFdMMaiwNWO3SC8jd5Qg8sNDvMbfKOlFvKozH+yT56/f+w9rcb/7W///KoE/91OMZ/PsvT0jPY2BKzfwdHOkaL4EZQuSeww6eLX/DOQ/h4lQSnHE4Yse1ggQNPloZ3sWF3VYsJrZ03ZEGtxTGN2lFEFpzz+Z8/nxQnOFZhfMpCb+rHkebrTcPgxlqrIGfwuXmkawUGefqkKsIguOuDn1tfFEZ51QYwlWS4EYPveZAtPYyN1UZf9pia2aZjvcrYm5+TgHRYrwPPKrjR8/Jbe2T6/4G1dT/Fzx+1/j842u/E/x4fHo/+/2d5UAW8AYsuDXGU7+xMCr1QvaRXYpbBC7yTpJNJCqoHVP4Zmnsn3v5kEq7gA9O/6yQLc6rzyGa2nKc4WZSnCNdpgruJuxLEqAGXsBLZm+0fzL5GDwiwxPWSd7G4TPIrUPCo8SZ4mBsjHSeN2A2MXyQZd5pQXb2/t7ea1BTx8eHPIa4z8BLlGthBC2z/4BuAm4DCQxg+L/IgvO7cQFEcMQwliqM9SkmtXHdwytyZTBqnIr1//GtSP1WIcpaHGfHjhIW3nF2fZ4WhPXkhNfVPJi/4OuEdedo/YT/pKN+vfzmofzkAhC+8ZM3uDoiePHZZiLdOSUCAEzASRIc8XnRPkFDqhfuKxZlTsnUP1As2WbIrQnmszYtijmZvZ8litnqaLcI0y+mNCrN5spIAZfRQO4NK0mUB9aKRFKSk8GLq+Y/ZNE02OTl6Wb7F0hfnDbAP+wcvD4/Yi+LQCf+K0JikkZz583tye/sautQ9tFP9LB22/gsPnXPrHC9xLxIZQhu+vfz59PL0h1fnDKR9L/1J43V1/Tt/D5zgKRxokM6N9lIIBZL21e0qAB51rQL5z3VBouySrTt+259l72/wcjT+8jG7Zg0mE7kD0JG4hJAILPrelFcEUYn7YZMSRZu0PnfY498lzHW/Nlnrfm8yhmHg8E2Ua1EJJWSzAlNwKwTqMi0EK3nHcLTTIMD823gdBns5p2Muj1Bt77E32A0Xka9qACFMW7wGkFg4GUhDNBlQKRieJgC9HciYbX3u8Fl8L4agtF0VkAZIJVWgBGxWhBK0Wx26cS4HlEqjGfdKOLEsSj1QQtEhqW+XFphUDA6nEaILJRahCycWQKs3NNBKcXR6RA8sF06tVwrQs64qKb5UI1gruxBWJnkDWC23DFQotQy4I3N7NvYOmp/Z3W9/q1s4YbyszvDUQPmt+GDXMpMf7PydNgS9lAgN86z9pX1lOV5ZJARibO43P6VP15tYxNUPSbKMCK2OQNt0YmBZ2zWh1Y0nhRW2nhS603wX5Q0NJyzmvvYpXiR3UfJe32MFkDKZa6BqgcWAQmnFoB1RX4tSoTdBLuU5pHWAMoEvpQmSDeCE4gohO9K+0ae3NS0gE61bQi2iEl4oqrJEV+RWjs/2IO9oiM7Xbq1cNVd9ok8GmueqnZ6urSbVFaerJlWlXIuTlamAZO19LczNpYERMyzIR9WGaKQcojNqs05vmvl/2jXKlnswUTTftq+kblIVzt1/E9zAWBTDBTqNG60O6ZzgSVn2eb1Zs9O7pW4NxMl1TtofKxr8vtVyvwKKQdPMmydvSzDVPBtI59egM68GjawvFfuqaTboTq/wCqaX9OmGOtkkmYUaGMTg9QZuAamEasJJBKoBNVPHnHhLOo1SX1EX+CeMMhWikbRuB6bWyI1v4sQgEmKSxBtK6OqibC0Y3zpTQL0pbkHSwbDLneUCsxuXlXhatx63encHvKMcAKhw410zL3TLoSRsNnWRRiuKQIseqoap904FpA2TJtxJ+ppVCdbhtEVqvc4IlnU9LSjvNlq4lu5mXnl2uwIqSvZSrQAWCXStszR4S33w9d63rAxuuc9IDNSebZtQ4tlWCtOYbaVQ5Wy7TKGKCy3MZr2wMKalYggg2jLUQMQCiAEa3ItBStahzdCPJ09YV00rQtC6WqgBSKeTBoxoKqkANNNIBVifQqq34iHe/F5vjeK9ZtpQATanjDakYLoQgTSnijZEe5oQfq9PEW2A1vTQ/qyYGtqgnWnhXa0dMOKO23XdV52b7OtkKrCamNXL+h3v/DMNRTkvNnjK/kpfV2qY/nlNYvJ4TvwAlFXzPdvAxMVt4zVMhfXuE8kXv1GyfE0eCFi+OOToq1W5vcRAakE03EhtvgbqfnRKA8fx9pF6rcS6JXUXoK1NYs0SWvi9oUuEEKUqSeDP11hrdb4T0/WzHLAtRxdSLI8SriGXErKSr70upm/X0WYZxnhJDP9bqj/XreHS3AktenqZgr37RjlsSqjaqCnfCQZN7atc5XfywRcrklRY46mkflNBbUqyf4s+tjuAOLm15FuTEemC2TAfshq4DSVbe7dy7iJ484b5E++bvW/2OKwmo2zVWtL8rUWP66ZO7U7QspSlZbeRpQjtTNCNjJ2C4uIJupuVsv1eM0FrkylKIQUTtDRLoBSiPUEr8t+JBWtN0JqEbnUVIE2gVrV/3ZnTySvG+GHxjyV9cX4txedmHbdTSgk+NOszF9ZiJ7lRnddWjQkz+9TrSZJOpwvSYUCoR1s5XpCzvO4A4wH7jGNRHpKTyUSSIoRFVxWRW4jhoYwHRuLsC773vDk/uMYvPC6ujae6pYiRCsuPtLhXKnj6YlJcpIfArXsKq8ppXvVXhj6xu/dpZNQL7ynZgPHz5D36ce7lCUup4PmxV8nJr7DD6OfTv9z8+vbm1a9n16/OX13eXpy+vvn17MfTi0svzLDPTl5w5BjoU95896J+JC/H8w5FHFCRwOBFEQDMjS0W7XUIzVJ+qec1WD1NSzTNpBZyUvVcEezDpD45YEWSZkW9KNMLcLx+K0iMxjkF3et950wL81YtorW6cPAqZpcoYqhWkszu/LQep/UCZ6x2ofUmXSdoufGzMUpgVmmsNjrn1fFgJC3Mjl++TnyY3COcx1N8T8+Ncznnfikwvgbgm718NpvBMDk79TYZCdBVAB07xCMytGTJF11rVPAJmMtgnpIAykCPkk3QNQcz9i3eNEUkHM7AVRPUu4ZISlwRLYr64qW9GjEkgDLIRGyx3CqJFxS02qIhsi0Cdkr1a3jGsPDxGZ/xGZ/xGZ/xGZ/xGZ/xGZ/xGZ/xGZ/xGZ/xGZ/xGZ/xGZ/xGZ/xGZ/xGZ/xGZ/xGZ/xGZ/xGZ/xGZ/xGR/H5/8DqXO9PwCAAgA= + chart: H4sIAAAAAAAAA+19a3PcRpKgP/evwFKzYXvO3XyIpL288MXRpGzzLFNckvLczMWFA2xUN7FCAz0AmhQ1M/fbL7Oq8K430JRsAzFjNYGsfNQjKysrq5K8z0ka+9E0iLPpyo/9JVmRON/9bMBnD56vj47ov/C0/6W/918e7h8cHRwf4/v9w4Ov9z/zjoZkQvZsstxPPe+zNElyFZzu+2/0IZL2n92TaBUu4yQlvWlgAx8fHkrbH5q91f7Hh/DK2xtAPu3zB2//F96Vn2MXyLw88ViDe4/3JPbuNmEUhPHSW/vzd9AtstnkhXd7H2ZetlmvkzSHH9BJIm8ZJXfeys/n9wD9lZeSyM/DBwLl8vvaez8OAEFMlvA1ib0v1ilZhO9J4D2GAPdvX868N3H05CUxLYkseWuSelEYk9lkdn7z600OvAGKs2S1AgS/nN14QZhmk9kyzHfpfxn7k9ndh3SX/rd4cb/cxf8Uf2YP8W6F6A7k26y9RRiRbPLnWfa4hv/e+e/gv/kKfv8/AP3FT8Nkk3kX56+A4DpN/ovM88ksDIi/y+Dg1WT2kM2TgOxOPnarmj+y8X9276f57MlfRf1p6Mb/wcvO+D/Y/3oc/8/x+OvwF5JmMCJPvIf9ib9eV3/uzfYPZt9MA/IwCUg2T8N1Tj+cej/C5ODNsYt4iyT18nviSTqSN09WaxjTcT6J/RU5kcFNHkRkP3bt/P4f2fgPkvlsmQxDQzP+D14eHbbG/9FLtP/G8b/9Z/fPE+/PMBOun2CyvM+9g739//BuTq+8m1cejGw/pn/4C5gdQz8ndDj78dPMO4WZnxbJYMbPSPpAghkzD3Ai9eDfKJyTOIMJfhMHhCmJU7Al4J+bZJE/+mBovGYgX3kPM+8AVMOcrHPPz7w4yaFcAkXSxzADbDEt/vri7NUlMIYUgG3k/LWUSImbKzTvYLbnfYEAO/zTzpf/HVE8JRswU56QqLcBYnkpBGcIqKPYUAHxnDBzJa8IzBDHXzmO5C73AdyHAmv4a1EH9PycM32f5+uT3d3Hx8eZT5mdJelyl9dXtsvFnALDvMDbGGwTrOi/b8IUhL178kBTQwH/DtiM/EfaVsuUwDc042LvMQVzCM2ujNc1ognCLE/Du03eqK+CPZC6DgA1Bq2/c3rjXdzseN+d3lzcfIVI/nJx++Obt7feX06vr08vby9e3Xhvrr2zN5fnF7cXby7hr++908u/ej9dXJ5/5ZEQGxFqEsw9kADYDLEmobMgrhtCGiwUk0m2JvNwEc5BtHi5AZ3kLROYIGJqjpJ0FWbYohm1KWkN7U4mu7vL5GRJYpJiP53NduF/92Da7RbvpvMkztMkikg6TckSRaWm6Cy791D7FaqQl6XTW7YrU5Ac6pdX1zcgNf+LvPdBOmhCGSk0qLziOb+8uUqTBzAi0xP/MZumCVT80cvme2jkKNkESL754cMmJZK36zR8QIHbX5dJsoygHhBh+1uyhhbIob5g4s3AUMZKrH+nhRaRLyAakzwKF0+d92G8SO6i5H3nQ0pWMMCxyWD1QRcX3JggMXbozKu3JF9t0L7BX2I3wG4yT9IU7HCvqm2vUduTdR37aE0IHln3zgl0ZKj+bABPkL3/5/jg+OvR//Mcj779f4VFPujcbJavHdeCGvvv5cHxUbP9D/aOx/Xf8zz/+Mfun72HcHXiZSSnVk/+tCbfrrBawDA5gan1X/+aINTk1Xuw/AKqeHEpV1g3VLfOJhxu6gVkEcZgZEl61gzL7njTCtrfRLk3Yw6HS0Q8+8WPNiSjkG9g2k9h0vD+6eXpJp57xy/pz3B1swGj9L23M62QEWAPfzN+z1KC84df0lhsoujJ+/sG5tQFWCBoQlFJZpO/EIadwudIA6XKvDsy99EkzBJg66fNHQhEYEww+QFHFIANAnZsFK7CnNle1Hr8Agw0rBuY9BAW5ys0ab6cTS4W6CAjfsYrEacusBgzPgHSd2EORiaY13cE7dEADWKfMs+5Na5qLNOs7nBR1m7xsazhAkYKYNQEUVZh+hMV5+Rb81au8VnWDMMyu2bVxgqXvDbeWjMIhlKcL7ydf8+m/57ttLAxujb9Tva70R9rDY3Difs9sJFpY/Oew6Ai/45EFg1OS+1IxavXPvtdrI3+Cb0S9D0sb3b+24638+uO7XireAOiCSw+nuaRn2VcukYHzMgKXp/hQhKGzs7/+Nbbn+0fTveQQVgO3cFKMw+hY+B44/zNfgjzglXElIFqCjYRjKvZu2+yWZjsPuxXrSujsd+bxh3J/f1mNxKB+dH6voATd4equsIVNF6jfu797Ir6xr2d7N4/ODo+2fG+KJd+O3+a5f4SF2vFqx2PvvqS4/DqoNCqSRbmSfrULlH78q9//c9WMTEFrzuEXIid2BETVODHnjR/R4/e/ptHYAuAek4i4rgfoLP/9vYPWvbf/hG8Gu2/Z3im0+mkvgeQ3vnzmb/J70GHf2Cr6ErDvgvj4MQ7Yx3iGjrEZAUKMfBz/2Tiecy/D4M7jKHPBEZ2yQyHucfmuQyReB5uPM+ye+Z/McPHpz2OzEPDbvautNaQeXPeGnyJUIXoJonnDF3TYpCWYfiD6d1Ts9QNSR/COS2YbiICNTCF0uEPabJZ0+qYejs78E9KsmSTzgl/l7FiWeOPXZhPwLL9AC3J389TkuNvmAnveMklyem/UZixH5s1NB+hPx9xq7bLAFZUTN1tjB8Q6jFJ31UTnoBB+IjuPtKfOrTObOmnATp+ZtQH1aUGMGvuWso6L3ahtfJN+R6aOA1J+88WkB/HSU47v+hVCzh5jGt02V8tkPsEBkzwIYlhGozCeY1+90uraJTM3zX/qAMwR5o/h6rI5iTN0WeKSlv5sUJg1TT4a05NWMYOdGH4adRfyQPUc4tgDdVa3PTwZ9X3eF/r4p6DMkpWxUtqVYVFyxnKx1npMECb5KlLcp0EMLg2aOTW2rMAQk3AAZ10YY1pWBCYVTCslRbhcuWvJYzs8El8CkMizoHvnW7tfOyZaHw+xmNl/93B/I9639IM1Nh/+wcHLf/vwcs9AB/tv2d4+tl/37EOMZqBw5mBUKvXZIF1UOh9RaMAVNcm79EE2eYOA9uoHcoQc9ZO5/NkE+f9mhf/yNa+qM7oa29c2j/7Y6D/0yCbPuz3iAVU6/+vjw73O/s/h4dj/O+zPNzniJ7oL5qu0dOrCz4rZLMf/czbEdnjMCvsfFm68pkhfXZ9nuFQrk8rkrLFjEJt+Gtut56XNrxgXqnWbDPh0nCcQia404QVAIuCIqiSIqNhEWQJBS9hvQmvlmx2EdZjXCweihnm/PLmFVT8E32Fq6ifGq9fs3WV562jTepHJ821NhCHuSu/rJAW62/2EWyITeSnZSmkks2TNVRNOTsEbMlCexDDAUujIKAdxY+ucKsDPf7RZhWXNNhCtRa6+v1/nl/ixiVuzFXisOe/siS+8vP7E2+GVYj9C4nXIFjbQ9naO9wuPcG6RTtIQrVwRlBgCUW6Jp8VkLdNQEb49q9Xrywp+xnG8pCgYuGLchamXflLI3Y6rFxdv/nl4vzVtSU7tG290v8gJ4z/dCvg5vb09q1t5TOaVDXh1k4egvi5v1qL6RcaZ1YUuBXAM25Of+i2RlBnu83IYgG2FZ6MgBlnyVxz8hrowvBOcHr9w6tb20qgbikvDNgeI25T+0uPj08MFyxj33BgZE9gS64UI4Niuwg6vL35y+Wr64tzS96wRZClCKpGXSV51K2O29e1d8Xe44l3AJZFhw9UEctab24xgm44qCMlDx+Y6mwy8bc3l6/MuVDXBvS2DEPW2Mu7ItQNeiPwUgVItsZHl1GOp8Prz69ubpo914ZdhqPc4WQvcQt05Vc6F0MJ0Xp4edP6gLTgY4r2Rf0tXev8Up+rqqdROZ9XRgnfQM1odfB5Abo244XFptBNRYw7hY5Ox3ILdYJ2j5fQJc+MrnQADc5Umyjgs2cOGObJMg4/lLgzFuWBYbcwJ+ctnHQWwoH0gCYRPfVEA4NTglS8TVzD98CsJu9nPHOFwZInNDI4O9ndXYZ5YSXNk9VqA/bQEwsqxejcJM12A/JAol3Q7lM/nd+HOWDfpGQXKnJKWY+pE3S2Cl6UvsLPW7wKuyM+dMZXtgNO/rhp6/PiTJaquotue/3q5rZ0SNImabcBrf2qYFY1BFYb1AqNUsaI7DRZsZMmcbBOoJ5ZnEQUkrjdCLCMXYU521KGJsIWm3ln1IVPg2qo6zeYeRcxvF2R6AwtqG03A9Z2NsWqNW+Iuv3bLcDqrvGpsP3qj2zM4TPHAf06Sd5t1hfYc6EZu0Ctxo8oOOvpAE910tnl6c+vcCz6uYexY1jN2OzRA5tuLq4EWD2038p9ovYDaFd+TpX28aHgu1illxwz400rDA2rqkLCgmSF8ftx0+5rkhS0Ez58UtRS7DcVG7GSEhg1BFcWOmZKSGTkDqPSmLlU8BbG9wTmBs/P+bkANg4FaFW9DJ9Y0hodluqxhSV7QbnQEPV6w2op2GA+KFNemGtqSwwVQS9ibqbqjighzC1HbdszODrBpUHmfUHHMJ7QOC3efVme38ihV+KnrlVaPcW454c3SCCAAgW5knQQTU2xz36a+k9diYE7vbgoAhdMJtfHlCHXa966pUzVbtF2xmqjj1KV99Wp110rV9hE0xS1UW0mqsjP8rd03sYFmX6OaoBXsaQ4hssVYDGoEbi7KK2ecqu4/RS1iZ+niNZWUXMjXSuO26LAiIXkjh3e+4Efrela4HVJ3Sbjwo2glbPjrrCVpu4/0VJruGXYlFeeZm75hoyI0/rXUq18INZtZarZO56G4uCUgYSfiHYzao1+vRKX8lqmqE+gb9eQ6EGxQp3SyaezDG+UZkP2BEO0i06UwXIEVUnj3eauEShSQFLl6/3jX5P27u+QbvpaxNToqn9uV/1pWfn0fcNfX30TOe2bsW9Sx73PPtYd936d5lDe+7brsejP12Qxq3pux8sFn+neud75piKA1SlC/VPzvQPmll1fYm4ZUY6YaY3L0Lc/GtBgHkWfTikdtKft1wzhXZJAd4/FGG287Msu7po5NjokR4fk6JD8/Tgka9OPyDarTxlqf5ORsSrxoNT0qQi5ztUl1x3F07xO6eqivKOEr0h5LeAtHL28XaJxI+SBjR8J9Y/YEY0lNXYvXtbci0NVs51TETho+BQ71X3OTsuWh0ElSL2ue5L7OVDZFX145iaUzjEptL3qAC3bqUlWWN0q/1LbKm1+rY3X7XmgmPmjXTBexAE7ZsE2QyrGcXYqnC30Ah/mqUvSubiBq0tNpNXYtrGKx9StBNPNHH3+aHfwMwQb5gPCmriLyKq4DImAAcKdT8LNGq/mjxrXxtq1MTslNC6LNcti21XvG6zW7oKXvhatdcujW9JlbtJd5iacCF/h8sjrgZa30siazvuPsWzjGIsATH/FY8KbmN+i7sk6mDXBN+T9OuS6p+VtV3nZ2/UHVn8YvAXTohsp9At+0ldgg6nnjxr7ba9nh1nBjmtW3Zp1oFXquC5tPANYf3Rbp7DGqTIPq50DtPdqV9UxYxAbuYi5YA2OzVvEiHzBb845QcvlSwF1tSEI0xXV89J9vQbzb9ZsumLCcqMVmpkKUDBedErGYg2SHTeOnryHkJm2t//7lm9QO6yh6VAwWk6VXLPRU6/sBnfISBnXwr+QenXf3pPyoqKQqb2wXBPzU5AlAtDzQa0e+q0fFcFKHXH5lVJkG41iwKxuXSgOBKgQSyrqEwqeqkwIPTdF12Nbl1VX4hMeqFCmN+gRG/ZrEbHrblDFLvwo20JcgWoh3TUwK2TPuUweWFFaa0beZ7R8vip362H9EM45i2rKWv8gtZvNAtA2qzusgUXZyc2ErypAtvtdQEh4tHG1mhHTSobZHmQnZbqkpBpX8fkP6Wsob0IZ3Q3PvQt/VY9iargkii8ir0T9LhupY2Kddj0TtaCpbW2/t4alzbm0rR7y+gSOd/HuXcSQq093cZhZUahD/eLy7PXb81fnv56/+fn0wvrEYckMxi+pWaEQBozgIac6G+Mpp0/UCdNCPQYVjEEFf3DnDV/V396+1tr7hQMAYKuwT1yPlIvMBV26lQcE0HGNd4Dl0fARqnyWMGA6o5epZmDbzAs/7CajyUE4DnoldSGcH0WdiaF6cDAXpWjilSQjDLy8lprZvSJ/lG7tQ97TecZo8cM0B9tHrVbULIkbusY4j9BAlDsRO/goIprx0Xpn1JHNlELcV6hyY/iTkUq5yioszTO6Z2zuJ6lMzmqlV+Z6aW5As3qyZczz3k8r+3+65qmJppv4XQzL5Sm7s72xOKsedmOmJOineU6BXqqIVie9T48fgqynQas5oLiXQ9gKyxCUp2yxO9iROrpCgskFNDtMFTSDEU1SVMWPUI9mcRTQl/YqVkN9Y0scAmY4m3VriL3hsjzeh3OWlYmxKJWAuk6LU16sPhylUJ/BMzl/Qo+dcBdYdT8HV+KlRZytYWoNUZVLfSL8QpPKZSbfKZCIQ1XNIPMMMs1P0ZZgYqWZVRMsV36Ui6+8BPOACuercQIaJyDVJ3t3tYVp6IPR62MGEdpt64Ziy0b8aHZgySFvS4NxaDhczKJIX3Hgoo99Mn2pHCFmcpTek09NDuWcMx6R1XuU2Agpd5+EstSnY1tun+M0rdkx08YGm6s4ZmZBW+3IlpajrhHI8VvUNX/IfbTuVf3jhtpzb6j9SNvgb1D4qribv7Wx1oYQbbAJ0zFId9ruP7C9ocZWWwtF7b7K4eKB2Q7HXBhjiwJ6Z61Psjjb8eDluEcy7pGMeyRKA4zpMa2tV2nWcgskY5dklH7cdaWazcnjg2hu0Gw9w9SjkiW5kKNGqRpj2C+KBTr+bt3e2XyYxTxHLFvwCLJFeKK/A4fKw4FbohQ4GoFybCqSVqiLIc5M0MZUqGT5vILnPGZ4wo3eJ1e7UQ/VrILbguOZxztY4eyjqe5ZuGfhBczvQSnkMMxAu9TIfDSTv37vkFmdXdVL1GutEQInXm0/h0RY0xfnZrL8jcEKpQgI2AC4S8K0RON24OeUyPHgqUSZTcuxaKyPrV2Tc1mUaHNzpIyjZGODzpCMazotl2PH2s+APp8byrWFV+m1oJCBb0nYpKzKPgHf0qdwTtbUGfM8548Vo1U3udSnF/l4F8wvjZtBlTO5gbLQXRQn4OKqcV3cQHwwNWvIwcX5MGTVhzjourdsIimIJFVBE4iJJx4IJmHcIm3+h/Q+0dSPo8Np4APjr6FWuz4kfCvyGxW5OKWuoqjrKIoYhTGZSZvwmMzkN53MhJ09ouNt2LQi9Nq+MbdIycSYW+S34VEerz6onvHqg9+TZ7jK/SBaLwx2I19zqi9oltviRcwm1PEaBhyGHFI3YPNQu3C3XLnSeP5EIWgSGh91ZzMsO+sfgaWfpKW2WW2iPFxHxOSmMNmF4oU9oOWmukaCzaltK6J4evtkfo95EPCLyFQrvzWMj4r+MAfyP4Xhu0g2qPDj1npDKy4+izDN8u/9MCIBXZxh9iFtF/keC3kLWop2hShpXgddYu/bYZ8p7G6LbtAtKiSzhh+zY4zZMQbIjtFvMtN11eef2cYcGKrSQ3ltMQM2LBD+oMnnjfO/38EywzEJvCb/+97BwUEr//v+y6ODMf/7czz1/O+49HVOAt8jCzztWmMq+C3ui6i3Fqbe/7p5cynfLTDadGhvJ3SmgiYV6Q6Bdq+hs31gRUlExXhfQbhdYES+btDoNhEE2wNqGlo3v80WQns7oLUVIBKumRRSnYms2YiNXOaaiuxejzzQzYCi/QWjRq2W7bo9h/Z+gtAt3rXURERrmwSKDYbO3oGCokbKprO/58JLtE9gwtrgJwkq1dXY+qWvRXu/9aOdkt1fJmNj97ewk4t7CN6yawi+b95CINoc7pqtNZOVX1JZW49KtkVEfiHxdsjWguuHD61/5i0S0x0R4dqnvROyldj5LUTOP/vuiPlmiLCeRZsgwhVne/ND5jk1SIyuWnnLV92S/QbpyllyM64UXpIjVBKwp12wdwOwRMmWB8TedZK41LPMrdD1z4t7ScfLLusnau/vFj2/OneUYr9Q6FWVd0CtC9RtIMjcnlJGVN5LaSGh1/EjDp5hurfYcSfhR+piazlZBSOhW7T0zBXmWD1gox3RJo7o0PntDMMo3b0GY2ba7XkOlPlZhblZDZZ9kpysonyslthq+/eiHKwO2OrpVaV5V40WXo3EPYKkPQxJ/YJzO69A+5hvY6X/+8tILFmDFXfwj4uwT3kR5hynNq7KPlLM2vMs0xTpYs0iW+x3LJXpYZWBOMq0sAOkhFXuPstSwf5m0sAqpZNdhdkn9auWoPQGy37pXr22GdF87FK9KoRQ5luVp3cVpna13N2XpXOVpXLtuUgX56TpfWxSHnjZNc2KR7L63spxU+lRU0mnsFsACkr8hteEY0bWrSwHmxt2olymz7EUUuUtFeQs1W3ECRONGiQzlbnsuvlJh9zvddvpHXz996y5ec3WetVVXeNCb1zojQu9T3qhpzfheqQU7JhO1rlY5RafMgfrcPlXOyKYp/5ULVqleVh752AVLm565WUVYrTI1apc8JmeE3PLzypk3Shnq+MCT3xQSerykOxCD5ybVSqLPCfrIPlYO/WjyM/quO8qWwl387Fuf627ZUVprBklOVh75V9VOuCkeVcHybmqPmlBZ88hTuDJc6wOkF9VcRBP8umP7DQYU6tuy2/QO967G3S6tYDqIUOpJXlBTRKNKvOGGtWZMBOoPq2oIk/oGHXcdIJ8vITAYwTy6BNhz+gT+b34ROS5q7aV0tQx5FmcvmpLKUyd05eqlg6KrHFDZ4xTXK2quYJDFnRKscozxA2eHW4bEkgXJupUpNtJQ6pYJTmmH0W74nUI+k0pAEKB2QBgzLtED9ruMs2HEzDjGRonLs4y4fAujjd2l6QJvy1afJ2EajzcbdJMv5L+DqFwqCaPGSuS0WG9RtKf0xef408YXYQE0iSpVGxmBdAt+M+v+RRwRdJz/+nzrzD9Z4QmH45v7J9FZATMYqskAbxi32DK/SBthNaL+7RRXlsvTXI43TabkqNj/OGyXXwjhcfPcrIksndP1dSIZQLFNej2FxvRxhK8b0puOk6kmXZ7Z9lllSH0vlh0b/NwIousuqrUtI4hPJZxSINk0TXMoOsSWJcLTnb0zpgrSvOnzpYrZV14A/t2MuRuITvuaN/89uybnpsBhisE18y221sOWGayNeja+uBXp4ySW+3Nep6dskc+q33+DAckO4w+y4HJ/tlnhQavxBh/9uOZwoOT1hlme0ylJlllx3H/sXkedwXHRLGSMlvaHuymRZWkRNWE8X5i5yM/5dS2YzjtuHVUdlZpvY9bR7+drSNxttdBMr2qbDKjDK+9srtKrw72pJldXTxVsmyu28jkamv4arK3biFz6/BZW7djXmuztA6UoXU73Csysg6ajfUZlwmyqFiBYhFmXO3pHhNmWB0ku6p8XW2SVXUrGVWf8zKpT+I4q4mTYdhjv5KRo86IqsuG6pwJVTNY9RlQe2Q/1dBWZT21zXiqJKXKdKrJcqrNcCrNbqoNom5ryD+yt2RMbDpe4G1DSURlvMDbNepcknZz0FSf4/3af8BI92fLrDxe6za6J/kzuid/L+5Jec7C3re6bSfNqNTklxwbd0stKhVLnMFv6HSiUvLSzGvWKUSdHRGCi5UHTBm63SvI5elB26lB+54N/1jDSpFaTzpyTNJ+2qf8dO5gv/04pi0pCda4Hd4Vjb3VGCvR/s+zB0/J7ra3Sucp5VrlueuTwlNKUB8LJk/buY1JQ6VPnmcG0dblcLf6O6TilMj4CbsZ/9hJOD/io8//GZB1lDxRL6hT9k9N/s/9o73j/b1O/s+Xx2P+z+d4uv79dbb7ULjyz8u2FzjvTfzmuLRp+M6r80odRzk7ikShRs9/6flPCZ7F9jMGyFOs8pc02pFRKR12k0pvXxN2rJBieQixgX8MUe8+saOm3tGkHclCwzNe12p/0Mpzqb5CEXH+Wp4G4f3v83syf5dtVrvrbN3g84s1bp54f5rdcqSz74AYumO9HQSmCm7nS2D4n1527x8cHQMWZINnyS0z3NY3sWd0ckuvkjT/Mc/XlazUUAOG78mGSgrTuI/NsoMz3o4EaJ1gJ99BgobUZJios48w51B9fmWwUaONB25lt2FSdw5hsNHF1dlJ4wo2fHlJ8sckfdf9cHVx3nyZkfkG3d5nsFJppCxbZD8wZ/bx0dHLyjZLN/Gp4stlEl/DFNA6u00/vYU2aZbhSz5QqpUXu9zxnFZrJ16LlREYrqhNtFOvdvpup+wO9M9Gu9M3V6BpWQRWQ1PQb7N1+bHeSNIa8tiZ7as0fIDF9JK8yuZ+5HNnVetOvHktXXTr8FmatGz7qXf6+nXtzbogEHTx4qWNb+LoCSv9ewBifplW9fvpMqubrtMp1vK3TjMTe/hIB+NaPPxCehR5EdI9xiQvocpNkYyQgEPlT1/WUSN7VXFksrC+vZ1T7o0Os+olv0BSQhw+Smj+m3eRtw7ofuUto+SOnqaBcn7u4ehjlwjg6dFibGbYn+nBeerzpMtpHKD+nEZKzXb0ldKSl91vOWUnuafM9w2V8G2th5qgouoL+oeiOmkdNLemdmtUyi/sx4zfG3rBKw0QC6EbNesgnB5RRzOXfbCa7JEMozIjMZ4lDtSs4CTybcNcqGHAjxpJwA4DBLv432x37sP/ZvM0N+KPlplBgXdEV18+gDSJwAvXemF02eQ4Y6fX63OLmAUGPmXgpeoQGqW7suo0ICzrvb2YsVRw0zrTnJaurluvXrx4ge4X6D1L5i4jQVM/yRqqOcKRhTWU/Znf04E7Z62KKUCmqwpGrjI0CPUdqYUueEBdGHyHpvBN+KHTbAXA7A4hphmAKJhTYXNl7Wf//TVhkRcy3lb+e2hyCqNnTozPmjuwL1DtYkI3bR1yWOy0M7sKNaQyAPeKahayb1bnpnR6CHANY5NeE0RSdgWPiv20Ap7RW16MeFeRGIbzV+KZTso7nxltuReQGYb//1yru02d97+vzXqMHL01z+XCmaefukqSSDhSS8AZvxgCLIgkmmlGqQl6d54NmLVicljubki+WStYy/C7CVsdRNY8PWbXySYnRy/1Gvkxm6YM1lIhm9Hoz7pCHYt4N9PGhlTcudfq4hrvNqrYiMAgbMsUsYRxvR42JDII8yItLGFcrYQNkFsz/GGTEiNrCQEdTCUT/H2ZVozKLtdmY9KIgivf2vFYcm0zGg2QD8CwbCQKWdaPQyMCA7AtGoNCltUjUIvYiVV0cgLG8zgzHIZrVsBxOBqSG0gU3eAUymI+SE0J9pTGbMjWZbEduma0hhNDOZBlgpgNaFN6wwkjHd4yQfTD3ISOrQB3YRycBkFKsqy9T4cs4+epz75Pcc9MzqICky1T9E6C2zxqc0PfT/M8knMhKmpNHteVi8g3M0MqaHvlZ05pEBnkik8mhJHWs6DVSwydxmsJYaHujMkMxb9E1Skk0Oo5C1JDSSHQcQoJlArOkIQ158lqjVGR+lHMAS3Hrwn6vjwrRm2HabPxakTAme3CMWdgO1YCOHu+bckOKJZJw7i7xK0JDyCZVsMK5bJRtHY0hxVJpnS1Qul1ry3dYQUT6WGtUGp1bEPPWRhzP2wljqtD1pLocDKZaAlnT60t2f5imasINxeuHcVB5dHqB1ffri3VQaVSKgcXp68NNWdJqFvLyoJw8wZbERxKGhOV4OgmtiPZVyBzZeDiP7ahNqAkWjXg5li2ozigPEoFYO9xNqfUSwYLD3RLGmdXtAv9gWU01gzuPmonDgYS01JfODqvHYgPL5+ZFnH2ajsxMLyUet3i5O62Juwq2ZnM01wIYeJyVuBy5qtyh9kowF6+aGvSgwpnoPn6OantiQ8in7HK6+G9tqU7tGA6XdfLrW1Pe2jxVErO2d9tR9NVJE0YYymGRRCjAWpnbkOaE65xuzmeu5FyzeCneP5/Sq9ZoFuEUMSAfT0tZzHi7CzyM3mnocfdEMKATQGuHnydk8jvnEBp8BUghBFfHVw9+MJedE2yp3h+RdIwkasSHGS0k6YUerqm4Eb8qmj0ZF09tgqWDceVGKUzh+nT9SaWM5c+TdNNbMBWB48rRz8kyTIiVPEFNsbNkpZjO4zO1o0F8WHlM7BvpAJaGTg25IcR0djEaQvoYOOYUx5cNp2Vo5LO2MyxoT64hCpDRyWdkaVjStVVqgvpcdtShuY5Yxd0zszFi+QuSt5breZCXsh9LWdKdkCxDPScWC4rJWdMeADJjNVbQy4H3WZIc1iRdFpNKpSxSjOmO6xgKmUmFcpIkxnRcxUGL4NmiU3O8MIWjTmMtxdO2T2JU3rBi7FBrKXjKsAlyaNw8WSl6WJWxl3RGRIdTiYDNScUykrLmZLtL5axjqsL5aDizCgOKo9OwckkMtZvplQHlUql3WQSGSk3E2qukrxZE7xTZf7unOAln4DfXEUkRdlpUBR2UxWWTAwvq4HqUAprpUJs2RhOXGOVIhLWQbXYcbAVOXWqRiepscqx5WIr0qpUkE5SI1VkQ91ZQrzbOQz0XryEA9q48lTIXRk29pS6eEm34SLV1qxFhQ5akY1EllLmONSUZn4w4FCGtS+b5i7ygmU3R7kRvUGEUXeKphCG3UOF3pVn43nMYc7axgxlMR85zT3bmWkM5xXrOWT4GeOaXhlmbriyK8bcbFUDWsOIYWCTtuWwMkNNqPWTxHyYMjlcRquW0GAiaEduVwjzAWxAbDBBlIO5K4TZmNYQcWcebwcONhFRb5OnJZzpbrkCsyuzN/RGRP1Eym5OtJpGFajduRVc/lTjUXn1kxSJMzd4kVSWh3N97RWQVvWnQu/KsypI0Sg8caDIRAyYoam+QXtrq+9DDdimBnVE7JnHv7ps4lsVN+1SDnT5ndYCLVh+UnIgKm/NxnoD9vQijLotVX5RMCEsbctDUIWLqVYxjUA0w6WLAeoe3Io6uEW4nAKTNVMsZK288xaEBSNqjt86DPLIuDKxBJhqJbCCWTMKjozr4vpcwvkMUFtzKwnfM4jaExV1IC80PwyC80RFHcjf+umS5NI6yOlng6qQ4HHgSKs1LKMCNSgdORRqCrPgPwkGB0Zo5qubIsm8kcrlhaYsuevMSQXbEh5MMkmlyyXSNoQZnZ4CyHVMk3MjnaNB3JtVtULoMGysIIyo9OP+1Xt6Ifw5zR6r5Z4w6ClLNmvMvJxIP+Z/6mYLaHOM2QIMuWxh68earVJxVybb0yKm6sNabQyvL/jYwMRVSUozUYnyOLQZLwYiKzal2Y2qHAqDkB1aLFrISa4HLOkuWIfwEJJhbV2lZBG+NxQJG2e6piXsRBFTGkqGWhI6IxEovL0AHTJD8E83YS+0CopzT7d5lbl4DGkMxvobmmfUin2WmtRBhC6tIcS4Jn600s69XIKUAtvx3iUwBNs3JL9YxklKaNWY8p/B/0NajDWGpSgKokPIpFqnCKQxXbuY0XEQoNgGvmapHfG1+aqm3LtPq9Kuqxs3RgaXWNJ0ekm1TWhHdyDB5KsgsURGqyFDQoOJoF4dSQUxXiVZUR1GKtfB1X9QbX802Q4j5+GzvXFjvBoQCuS2KnBiY1viylYJ5vLqVwtujAwpsXL1oBLVdBVhSXlo2WSrCp1o+tWFHdkh5ZLauiqhDGxeC4JDiqOyFxUSmdqNdnRtBWMRMVd071Kw3cQ+T9fFdzmnCkTWPCn9kcb+RzkaW4YWCRjtZ2nwdh34eaeZ6VdQncF0Q7/LOZLjseVoWR0UNrl/v9e1ABa0hhFDHmjX7/S/DbV+kugC7Xoc8jcnNJgIkkC7Xmf5bYgNJogg0M75yL4pEWvmU5h5xSsK+km7ZpAgsOWjOP5rol7cD+GbUhmAe7lW6XHW3phODwF0ysT1SL0hiWE4l+gQ95PzxmSG4V+gOtwOyBuht+d5iTmFAKeh548XoKy7ePusCA4ijVjvSKTQKR0D/D2YlnjsGtzqvXRqhL3YU3jj2kyaeeBMsLtzrI5PqHNsvDYwQe7OsCAmoc6lMh5BisWdHRtl4KYEtjP6TYa91XAfdpybeRjrzDp4FW3IDSmK0HuolUXjMbQi2FcauWdQIIaRN9CMwhB8C71+ErY1nj4j9H15lsQPCDjWxw4Y4B6EXXHMgJRlXbyAIY2+rIt9pwKudf5SPeK+rGpiAwQ8W8QFWBDrK4fUv9uVwHI+GtCP+65rcijNjHf9jIt3mzvC3nSoll8UxIWl3Xng9xSfk3WUPJ2lQaevVaDlfcgBBUa3raKXGZJw57yrM2usqrSkDIU7Kz+HS/buQll/qwIMlLhRzcnw2nIaET8j5/yvNoP04zTgX+VsSZE4cSOy9xgnaqNOWNiJg2sSk8dz4gdRGEtYSREEujuD0TAlxefIHYuUwnv0ZMwxiCle0qflTYzNkbU8fRIvhwrOMNJLtwRS4bLmy+RWQ/vLDLVo7flcviYPpHPUFt5D3cEHFS/dorbkcZ2ew/+7d9hWX+QciEvb8hDLbHQDg1xY1pkBGAx+dDqfg0HxJo46NkB1tjFCwKlPIacJgBpwqEBuzbD5jZfOF10a0ujPunwrwf0+S1Mq7tzr9hEcr600IzAI25JNBOfbKU2JDMK8YAfB6RJKE+S2DCeA5DVOZW0O8QObEOUMCQtbc2B/7WXv2y4taQ4nklx/9L/U0pZqf6l0eqXn3ZV2BAcVR6Jvel9RaUt0UKEEeqjXTZQ2xKwF0dw8aXPhpAqXLV/raLMM4+8Fl4WwL1P1bSHi4tZMaLZ1bLZyVLhc+BI1lkEbiYpak1fdWWl6VaUUiSs3Uk4MmBiGAa2TrLpO0spHZkbAme2uh6ziU+UgkyBw5kPlHqs4MvSOabE6s6nb63W7+NQIfS+ehQrD5n5TJTZb1lKNSWNhvqhQ9WBLYppYmSFqdD2YE5gYxuaEHI01Q2aXkLrcPWqAuh+38hWC0xWjJsjdGNYZ//Y3ierx9uZUNngcLgw1wd2bX9F4srwXVIfTjUfmTpyTFLOAzQF5ponYkxWzCd9zID2UdNZi9RBnGDmUt7WaX9KqQGTLU6a+lNXiLlYFJnum0geSXiVp/mOedy5hZV9hAZPm03v4rmJJhseFo3BOzEN6eQHnkF4rgoNII25+iRS6rmCAvwfTkpDeBrf6kF41wl7sKUJ620yahfSaYHfnWB3SW+fYOKTXBLk7w4KQ3jqXylgbKRZ3dmyUgZsS2M7oNxn2VsN92HFuFtJbZ9YhpNeG3JCi0ALWsjxgKTdhOgT7SiMP6RWIYRTSa0ZhCL6F4QIStjURBEbo+/IsCekVcKwP6TXAPQi74pBeKcu6kF5DGn1ZF4f0CrjWhfTqEfdlVRPSK+DZIqTXglhfORSrj7YElvPRgCG9mSgfhCYNRKeMNVFd2gerbA9KbLassfZo88PeylnolnKjqzMKHAwBPeL+rAonfZeJ3gC1G7fySd1mIlfhcudLOGmbT9QKRG48SSZl44lYiqUHO+JJ12qiVWJzY008qRpOpDIUbqxoJk37iVKL1o1P7QYxA7PbHTZA7catbBQoN4VFRd3Iq7aDOSOGe8FqfG7cSe0bC5tGgcmaqW5eJmU6prxfGqYHGA5h94wIfy0nKyhnS/qDQeYn24RPOpwCHl+8eEFfLQmoBj8ngddA2Sia+vGSeH8Kg/dfeX9aRP7SO/m25MQPghBL+NFZHUFTJOCcFdyKAx697tlJjRwUZBH8CH3iOfjiAWma5Mk8iU6827MrGYckfqiXgj9PGqB58ld/FTXA/+nFYRyQOPe+0dRGFD5A42TZVZrckZMaa7jB8APJ66+AXz+/P/F272FKyu8/ND+514Ln4a7Oipx4P97eXtU+hDG0uh/RvZ4bPMcVZCfey70aRB6uSLLJy49HKlmLUz5ZXShBDZZwtXrc3xP0oSStpN1kebKaPSTRZkV+TjZxntXwVVtrxVZuHVu9UJu1ek8VkGjUo0AUURmZUMI663ChEwaHxgoJXbG+gnuJ2e7cb7Q3Gq0nHv00bXySjNfGK03lm9Y7hz6RDTudoFO5FGxHsDl02LtLWgRGSRjTzQFvh7zPSQqKjeZrWvmxv4SREOezxSaKkMCONwOqja3dOjEDBdesnBagqr9kxnqk84J7Ik7nc+wKrlJPhAIt/TTACQV0CgnAzoJyYV7b0FinYZLCG7pPxEgXRabZU5aT1fQ/9vZknOOLxzC/L8nFSUBuSATLgaR2WKv+VqyOxZXXoteh5i8WqPVq4hRvzKnoaORJRJharnWH2ksnSp99pEfSk3ahmdcRBjDswodkWlrBsyeQxpLGHjzHh4f0X3ja/+4d7x1/tv/ycP/g6OD4GN8fHOzvff2Zt7cViVsPjFg/9bzP0iTJVXC677/RhysHH3rhF3GSS8wPpiUWIUm/1OmRzmf2o6Fo/HX4C7PSTzz00Zaw9ALH3Yd9P1rf+/uTdzBgTrzzyxu6OJ6sSO4Hfu7jAGOTB+uWZMoQTAsa/Dt11zCj6prQ40+zhhcnW5M54kqYJ+XEo5K0mKmZZAqRhGACBevPc7AYT7w83ZBJNfw/Xvvrx/99Ajo/aCx3LHWAbvwf7h20xv/LveP9cfw/x1NbOxb9lzU45hm94g2OfXc6ndqP2x+bqJ4EQxgHTswd5sWAzBqWQX0WLb7UZtNjNrJo53zqlmDv2/CfwMz7aTz68b/O1i6Tfu3RjH8Y9IfN8b//9dfHR+P4f46nPqTZUIFhfAejtBjFV0lwQ+YbXBGoxrD90oTuV3Fj+Z5Eq1l2vzu/97kbRIuPgtbWOf56Ta+rSWOCIbthsmvOW2vJ1EUV4tnIWGRMyMsw/MH07qlZ6oat7Qot1FyKZ7yqZ4DPT1dJGYjeAYAfmEO+Eajux2DCFYuQ1rJPirkSgL6uALEXtGTyoyh5JAHLD8CSZ594n6ewRg1XZJdHen5ujq9IGV7hO/EE+GraWiaVqDqo1wDf1oTHCcpIqCCZvyNpwcNXQhmN0LvJyH8WMyLn8cxf+3dhFOYh8vh//i984QhOA9FHWMs/AFHohifewo8yUqC6Kj68yuZ+RLtMBZGSv2/ClATnabJuIJ2g2+b09etJw/0z5c4Z+ImmwyXJH5P0XYUOX15cnTVfXF2c1whu4tPsbUb4hJ9uIqijn0EzX+MH9g5tFK4tpmD3A7vHR0cvD/lae+W/r15k5HUYb97XkVFEpzGuDLLNeh3Rce9HP6TJZp0NQ3WRUWzDIEuJH+BlLdcw6+AJY+Z44euGIfW/fv5Pk4j0MwA08//+8cuXLft/b39/XP8/y9M269M7fz7zN/l9koYf2OL/3TdUjz0UFsE1dIihjACTZfpoKkBBVChQA1MoHVZaa+rt7ExaOzNTvku48tdZ7Rub1yZ8p8/Wf467daAIIugmqPxJesdxLanen3osaY4DfzVcNJJJgGOeJGkQxvXe2EVKa2w4hHWMz1SFj3gAtVaZzzL+zfT/HQz8MF46TgM6/X9wtNfS//uHh+P671keR/3/HesQ4zTw3NMA1P01WdAlAddpijYDqNqU3aOFsg0N6KMTEMN409ih3H7rf+xx8nt99PqfLe4aO+iW84BO/788bNv/L/f3D0f9/xyPYdRGe5ooJ4MbtvQfYh4QBGmYaAa8A+rEe7P2/w5L44KHuT+btwKqGqLRqJMZQsEPgV9JDFxet85+G6DnJboOrPpeq6Aoi/eaVVEvH6cRGBt9GiKPMpOW4PIWrcHKGVQxLyeu5nGDR/+Y6H92mMp9AtDa/wed/d+D45ej/n+Ox1D/S9QO7RkNvaPCx3vSrLZNIdi3aG/gmmKqdngPGdZGHNcg+tAyroQpRJkCK2RAKGCe+/G9nbMIuiRJL652io3tMnK6iJnkRYuAvXbocIMMfq0H9zUjpps77R9xRfQpxML8ER9j/e+zld4W4v/294/b/p+D/ePjUf8/x6OwKRsL/NHR81FCA6p97nprnConUMDYmjvlpf/p1edMwTKFdMMaiwNWO3SC8jd5Qg8sNDvMbfKOlFvKozH+yT56/f+w9rcb/7W///KoE/91OMZ/PsvT0jPY2BKzfwdHOkaL4EZQuSeww6eLX/DOQ/h4lQSnHE4Yse1ggQNPloZ3sWF3VYsJrZ03ZEGtxTGN2lFEFpzz+Z8/nxQnOFZhfMpCb+rHkebrTcPgxlqrIGfwuXmkawUGefqkKsIguOuDn1tfFEZ51QYwlWS4EYPveZAtPYyN1UZf9pia2aZjvcrYm5+TgHRYrwPPKrjR8/Jbe2T6/4G1dT/Fzx+1/j842v+65f/fPz48Hv3/z/KgCngDFl0a4ijf2ZkUeqF6Sa/ELIMXeCdJJ5MUVA+o/DM09068/ckkXMEHpn/XSRbmVOeRzWw5T3GyKE8RrtMEdxN3JYhRAy5hJbI32z+YfQOv0R2CjhDgjKsn72JxmeRXoOdR8U3wTDcGPE4aIRwYxkgy7juhKnt/b281qenj48OfQ1xu4F3KNbCDFtj+wTcANwG9hzB8euSxeN0pgqI4YhhKFEd7lJJax+7gzLkzmTQOR3r/+NekfrgQ5SzPNOLHCYtyObs+zwp7e/JCavGfTF7w5cI78rR/wn7Swb5f/3JQ/3IACF94yZpdIRA9eezOEG+dkoAAJ2AriM56vOgeJKHUCy8WCzenZOuOqBdszmQ3hfKQmxfFVM3ezpLFbPU0W4RpltOLFWbzZCUByujZdgaVpMsC6kUjN0hJ4cXU8x+zaZpscnL0snyLpS/OG2Af9g9eHh6xF8XZE/4VoTFXIznz5/fk9vY1dKl7aKf6kTps/Rce+ujWOd7lXuQzhDZ8e/nz6eXpD6/OGUj7evqTxuvqFnj+HjjBwzjQIJ2L7aUQCiTtG9xVADz4WgXyn+uCRNklW1f9tj/L3t/gHWn85WN2zRpMJnIHoCNxCSERWPS9Ka8IohL3wyYlijZpfe6wx79LmOt+bbLW/d5kDKPB4Zso5aISSshmBabgVgjUZVoIVvKOUWmnQYBpuPFWDPZyTsdcHqHa3mNvsBsuIl/VAEKYtngNILFwMpCGaDKgUjA8VAB6O5Ax2/rc4bP4XgxBabsqIA2QSqpACdisCCVotzp041wOKJVGM+6VcGJZlHqghKJDUt8uLTCpGBxOI0QXSixCF04sgFZvaKCV4uj0iB5YLpxarxSgZ11VUnypRrBWdiGsTPIGsFpuGahQahlwR+b2bOwdND+zK+D+VrdwwnhZHeWpgfLL8cGuZZY/mPs7bQh6NxEa5ln7S/vmcry5SAjE2NxvfkqfrjexiKsfkmQZEVodgbbpxMCytmtCqxtPCitsPSl0p/kuyosaTljofe1TvEjuouS9vscKIGUy10DVAosBhdKKQTuivhZlRG+CXMpTSesAZQJfSvMkG8AJxRVCdqR9o89ya1pAJlq3hFpEJbxQVGWJrsitVJ/tQd7REJ2v3Vq5aq76RJ8MNM9VO0tdW02qK05XTapKuRbnLFMBydr7WpiiSwMjZliQlqoN0cg8RGfUZp3eNNMAtWuULfdgomi+bd9M3aQqnLv/JriIsSiGC3QaPlqd1TnBA7Ps83qzZod4S90aiHPsnLQ/VjT4tavltgUUg6aZNw/glmCqeTaQzq9BZ14NGslfKvZV02zQnV7hFUwv6dMNdbJJEgw1MIjB6w3cAlIJ1YSTCFQDamaQOfGWdBqlvqIu8E8YbCpEI2ndDkytkRvfxPlBJMQk+TeU0NV92VowvoOmgHpTXIakg2F3PMsFZhcvK/G0Lj9u9e4OeEc5AFDhxrtmzuiWQ0nYbOoijVYUgRY9VA1T750KSBsmTbiT9DWrEqzDaYvUep0RLOt6WlDebbRwLd3NvPLskgVUlOylWgEsEuhaZ2nwlvrg671vWRnccp+RGKg92zahxLOtFKYx20qhytl2mUIVF1qYzXphYUxLxRBAtGWogYgFEAM0uBeDlKxDm6EfT563rppWhKB1tVADkE4nDRjRVFIBaKaRCrA+hVRvxUO8+b3eGsV7zbShAmxOGW1IwXQhAmlOFW2I9jQh/F6fItoAremh/VkxNbRBO9PCu1o7YOAdt+u6rzoX2tfJVGA1MauX9ave+WcakXJebPCU/ZW+rtQw/fOaxOTxnPgBKKvme7aBiYvbxmuYCuvdJ5IvfqNk+Zo8ELB8ccjRV6tye4mB1GJpuJHafA3U/eiUxo/jJST1Wol1S+ouQFubxJoltPB7Q5cIIUpVksCfr7HW6nwnputnOWBbji6kWB4lXEMuJWQlX3tdTN+uo80yjPGuGP63VH+uW8OluRNa9PQyE3v3jXLYlFC1UVO+Ewya2le5yu+khS9WJKmwxlNJ/aaC2pQkARd9bHcAcY5rybcmI9IFs2FaZDVwG0q29m6l3kXw5kXzJ943e9/scVhNYtmqtaRpXIse182g2p2gZZlLy24jyxTamaAbiTsFxcUTdDc5Zfu9ZoLW5lSUQgomaGmyQClEe4JWpMETC9aaoDV53eoqQJpHrWr/ujOnk16M8cPCIEv64jRbis/NOm5nlhJ8aNZnLqzFTo6jOq+tGhMm+KnXkySrThekw4BQj7ZSvSBned0BxuP2GceidCQnk4kkUwiLrioCuBDDQxkWjMTZF3zveXN+fo3fe1zcHk91SxEjFZYfaXGvVPD0xaS4Tw+BW9cVVpXTvPGvDH1iV/DTyKgX3lOyAePnyXv049zLE5ZZwfNjr5KT32SHQdCnf7n59e3Nq1/Prl+dv7q8vTh9ffPr2Y+nF5demGGfnbzgyDHQp7wA70X9ZF6Oxx6KOKAij8GLIg6YG1ss2usQmqX8Uk9vsHqalmiauS3kpOopI9iHSX1ywIokzYp6UWYZ4Hj9VpAYjXMKurf8zpkW5q1aRGt14eBVzO5SxFCtJJnd+Wk9TusFzljtQutNuk7QcuNHZJTArNJYbXSOreP5SFqYncJ8nfgwuUc4j6f4nh4f53LO/VJgfA3AN3v5bDaDYXJ26m0yEqCrADp2iCdlaMmSL7rWqOATMJfBPCUBlIEeJZugaw5m7Fu8aYpIOJyBqyaodw2RlLgiWhT1xUt7NWJIAGWQidhiuVUS7ylotUVDZFsE7LDq1/CM0eHjMz7jMz7jMz7jMz7jMz7jMz7jMz7jMz7jMz7jMz7jMz7jMz7jMz7jMz7jMz7jMz7jMz7jMz7jMz7jMz7jMz62z/8HdsP8JgCAAgA= values: image: tag: v0.12.8-dev @@ -37,3 +37,5 @@ spec: type: netlify-dns - kind: DNSProvider type: infoblox-dns + - kind: DNSProvider + type: remote diff --git a/pkg/apis/dns/crds/dns.gardener.cloud_dnsentries.yaml b/pkg/apis/dns/crds/dns.gardener.cloud_dnsentries.yaml index cf98738d0..4a0233677 100644 --- a/pkg/apis/dns/crds/dns.gardener.cloud_dnsentries.yaml +++ b/pkg/apis/dns/crds/dns.gardener.cloud_dnsentries.yaml @@ -102,6 +102,26 @@ spec: required: - name type: object + routingPolicy: + description: optional routing policy like weighted, geolocation,... + properties: + parameters: + additionalProperties: + type: string + description: Policy specific parameters + type: object + setIdentifier: + description: SetIdentifier is the identifier of the record set + type: string + type: + description: Policy is the policy type. Allowed values are provider + dependent, e.g. `weighted` + type: string + required: + - parameters + - setIdentifier + - type + type: object targets: description: target records (CNAME or A records), either text or targets must be specified @@ -139,6 +159,26 @@ spec: providerType: description: provider type used for the entry type: string + routingPolicy: + description: effective routing policy + properties: + parameters: + additionalProperties: + type: string + description: Policy specific parameters + type: object + setIdentifier: + description: SetIdentifier is the identifier of the record set + type: string + type: + description: Policy is the policy type. Allowed values are provider + dependent, e.g. `weighted` + type: string + required: + - parameters + - setIdentifier + - type + type: object state: description: entry state type: string diff --git a/pkg/apis/dns/crds/dns.gardener.cloud_dnsproviders.yaml b/pkg/apis/dns/crds/dns.gardener.cloud_dnsproviders.yaml index 4f49d55ca..0a4a1a883 100644 --- a/pkg/apis/dns/crds/dns.gardener.cloud_dnsproviders.yaml +++ b/pkg/apis/dns/crds/dns.gardener.cloud_dnsproviders.yaml @@ -105,11 +105,11 @@ spec: given type properties: name: - description: Name is unique within a namespace to reference a + description: name is unique within a namespace to reference a secret resource. type: string namespace: - description: Namespace defines the space within which the secret + description: namespace defines the space within which the secret name must be unique. type: string type: object diff --git a/pkg/apis/dns/crds/zz_generated_crds.go b/pkg/apis/dns/crds/zz_generated_crds.go index 566d43d90..8f72df450 100644 --- a/pkg/apis/dns/crds/zz_generated_crds.go +++ b/pkg/apis/dns/crds/zz_generated_crds.go @@ -240,6 +240,26 @@ spec: required: - name type: object + routingPolicy: + description: optional routing policy like weighted, geolocation,... + properties: + parameters: + additionalProperties: + type: string + description: Policy specific parameters + type: object + setIdentifier: + description: SetIdentifier is the identifier of the record set + type: string + type: + description: Policy is the policy type. Allowed values are provider + dependent, e.g. `+"`"+`weighted`+"`"+` + type: string + required: + - parameters + - setIdentifier + - type + type: object targets: description: target records (CNAME or A records), either text or targets must be specified @@ -277,6 +297,26 @@ spec: providerType: description: provider type used for the entry type: string + routingPolicy: + description: effective routing policy + properties: + parameters: + additionalProperties: + type: string + description: Policy specific parameters + type: object + setIdentifier: + description: SetIdentifier is the identifier of the record set + type: string + type: + description: Policy is the policy type. Allowed values are provider + dependent, e.g. `+"`"+`weighted`+"`"+` + type: string + required: + - parameters + - setIdentifier + - type + type: object state: description: entry state type: string @@ -830,11 +870,11 @@ spec: given type properties: name: - description: Name is unique within a namespace to reference a + description: name is unique within a namespace to reference a secret resource. type: string namespace: - description: Namespace defines the space within which the secret + description: namespace defines the space within which the secret name must be unique. type: string type: object diff --git a/pkg/apis/dns/v1alpha1/dnsentry.go b/pkg/apis/dns/v1alpha1/dnsentry.go index 4036b6ded..5d9e2be61 100644 --- a/pkg/apis/dns/v1alpha1/dnsentry.go +++ b/pkg/apis/dns/v1alpha1/dnsentry.go @@ -76,6 +76,9 @@ type DNSEntrySpec struct { // target records (CNAME or A records), either text or targets must be specified // +optional Targets []string `json:"targets,omitempty"` + // optional routing policy + // +optional + RoutingPolicy *RoutingPolicy `json:"routingPolicy,omitempty"` } type DNSEntryStatus struct { @@ -83,6 +86,9 @@ type DNSEntryStatus struct { // effective targets generated for the entry // +optional Targets []string `json:"targets,omitempty"` + // effective routing policy + // +optional + RoutingPolicy *RoutingPolicy `json:"routingPolicy,omitempty"` } type DNSBaseStatus struct { @@ -118,3 +124,12 @@ type EntryReference struct { // +optional Namespace string `json:"namespace,omitempty"` } + +type RoutingPolicy struct { + // Policy is the policy type. Allowed values are provider dependent, e.g. `weighted` + Type string `json:"type"` + // SetIdentifier is the identifier of the record set + SetIdentifier string `json:"setIdentifier"` + // Policy specific parameters + Parameters map[string]string `json:"parameters"` +} diff --git a/pkg/apis/dns/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/dns/v1alpha1/zz_generated.deepcopy.go index b0d0e38c8..7c21204f6 100644 --- a/pkg/apis/dns/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/dns/v1alpha1/zz_generated.deepcopy.go @@ -288,6 +288,11 @@ func (in *DNSEntrySpec) DeepCopyInto(out *DNSEntrySpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.RoutingPolicy != nil { + in, out := &in.RoutingPolicy, &out.RoutingPolicy + *out = new(RoutingPolicy) + (*in).DeepCopyInto(*out) + } return } @@ -310,6 +315,11 @@ func (in *DNSEntryStatus) DeepCopyInto(out *DNSEntryStatus) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.RoutingPolicy != nil { + in, out := &in.RoutingPolicy, &out.RoutingPolicy + *out = new(RoutingPolicy) + (*in).DeepCopyInto(*out) + } return } @@ -1050,6 +1060,29 @@ func (in *ResourceReference) DeepCopy() *ResourceReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingPolicy) DeepCopyInto(out *RoutingPolicy) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingPolicy. +func (in *RoutingPolicy) DeepCopy() *RoutingPolicy { + if in == nil { + return nil + } + out := new(RoutingPolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ZoneInfo) DeepCopyInto(out *ZoneInfo) { *out = *in diff --git a/pkg/client/dns/clientset/versioned/clientset.go b/pkg/client/dns/clientset/versioned/clientset.go index a9f001af9..94e0ef416 100644 --- a/pkg/client/dns/clientset/versioned/clientset.go +++ b/pkg/client/dns/clientset/versioned/clientset.go @@ -61,6 +61,10 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface { func NewForConfig(c *rest.Config) (*Clientset, error) { configShallowCopy := *c + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + // share the transport between all clients httpClient, err := rest.HTTPClientFor(&configShallowCopy) if err != nil { diff --git a/pkg/controller/provider/alicloud/state.go b/pkg/controller/provider/alicloud/state.go index 511001c50..d82e2bd47 100644 --- a/pkg/controller/provider/alicloud/state.go +++ b/pkg/controller/provider/alicloud/state.go @@ -27,9 +27,10 @@ type Record alidns.Record var _ raw.Record = &Record{} -func (r *Record) GetType() string { return r.Type } -func (r *Record) GetId() string { return r.RecordId } -func (r *Record) GetDNSName() string { return GetDNSName(alidns.Record(*r)) } +func (r *Record) GetType() string { return r.Type } +func (r *Record) GetId() string { return r.RecordId } +func (r *Record) GetDNSName() string { return GetDNSName(alidns.Record(*r)) } +func (r *Record) GetSetIdentifier() string { return "" } func (r *Record) GetValue() string { if r.Type == dns.RS_TXT { return raw.EnsureQuotedText(r.Value) diff --git a/pkg/controller/provider/aws/aliastarget.go b/pkg/controller/provider/aws/aliastarget.go index 9ebc49d21..862afb1c1 100644 --- a/pkg/controller/provider/aws/aliastarget.go +++ b/pkg/controller/provider/aws/aliastarget.go @@ -17,10 +17,12 @@ package aws import ( + "fmt" + "strings" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/route53" "github.com/gardener/external-dns-management/pkg/dns" - "strings" ) var ( @@ -92,14 +94,15 @@ func buildRecordSetFromAliasTarget(r *route53.ResourceRecordSet) *dns.RecordSet rs := dns.NewRecordSet(dns.RS_ALIAS, 0, nil) rs.IgnoreTTL = true // alias target has no settable TTL rs.Add(&dns.Record{Value: dns.NormalizeHostname(aws.StringValue(r.AliasTarget.DNSName))}) + rs.RoutingPolicy = extractRoutingPolicy(r) return rs } -func buildResourceRecordSetForAliasTarget(name string, rset *dns.RecordSet) *route53.ResourceRecordSet { +func buildResourceRecordSetForAliasTarget(name dns.RecordSetName, rset *dns.RecordSet) (*route53.ResourceRecordSet, error) { target := dns.NormalizeHostname(rset.Records[0].Value) hostedZone := canonicalHostedZone(target) if hostedZone == "" { - return nil + return nil, fmt.Errorf("Corrupted alias record set") } aliasTarget := &route53.AliasTarget{ DNSName: aws.String(target), @@ -107,11 +110,15 @@ func buildResourceRecordSetForAliasTarget(name string, rset *dns.RecordSet) *rou EvaluateTargetHealth: aws.Bool(true), } - return &route53.ResourceRecordSet{ - Name: aws.String(name), + rrset := &route53.ResourceRecordSet{ + Name: aws.String(name.DNSName), Type: aws.String(route53.RRTypeA), AliasTarget: aliasTarget, } + if err := addRoutingPolicy(rrset, name, rset.RoutingPolicy); err != nil { + return nil, err + } + return rrset, nil } // canonicalHostedZone returns the matching canonical zone for a given hostname. diff --git a/pkg/controller/provider/aws/execution.go b/pkg/controller/provider/aws/execution.go index 1eb8ab8f2..5e066a59d 100644 --- a/pkg/controller/provider/aws/execution.go +++ b/pkg/controller/provider/aws/execution.go @@ -44,7 +44,7 @@ type Execution struct { rateLimiter flowcontrol.RateLimiter zone provider.DNSHostedZone - changes map[string][]*Change + changes map[dns.RecordSetName][]*Change batchSize int } @@ -54,14 +54,14 @@ func NewExecution(logger logger.LogContext, h *Handler, zone provider.DNSHostedZ r53: h.r53, rateLimiter: h.config.RateLimiter, zone: zone, - changes: map[string][]*Change{}, + changes: map[dns.RecordSetName][]*Change{}, batchSize: h.awsConfig.BatchSize, } } -func buildResourceRecordSet(name string, rset *dns.RecordSet) *route53.ResourceRecordSet { +func buildResourceRecordSet(name dns.RecordSetName, rset *dns.RecordSet) (*route53.ResourceRecordSet, error) { rrs := &route53.ResourceRecordSet{} - rrs.Name = aws.String(name) + rrs.Name = aws.String(name.DNSName) rrs.Type = aws.String(rset.Type) rrs.TTL = aws.Int64(rset.TTL) rrs.ResourceRecords = make([]*route53.ResourceRecord, len(rset.Records)) @@ -70,33 +70,38 @@ func buildResourceRecordSet(name string, rset *dns.RecordSet) *route53.ResourceR Value: aws.String(r.Value), } } - return rrs + if err := addRoutingPolicy(rrs, name, rset.RoutingPolicy); err != nil { + return nil, err + } + return rrs, nil } -func (this *Execution) addChange(action string, req *provider.ChangeRequest, dnsset *dns.DNSSet) { - name, rset := dns.MapToProvider(req.Type, dnsset, this.zone.Domain()) - name = dns.AlignHostname(name) +func (this *Execution) addChange(action string, req *provider.ChangeRequest, dnsset *dns.DNSSet) error { + name, rset := dns.MapToProviderEx(req.Type, dnsset, this.zone.Domain(), req.RoutingPolicy) + name = name.Align() if len(rset.Records) == 0 { - return + return nil } this.Infof("%s %s record set %s[%s]: %s(%d)", action, rset.Type, name, this.zone.Id(), rset.RecordString(), rset.TTL) + var err error var rrs *route53.ResourceRecordSet if rset.Type == dns.RS_ALIAS { - rrs = buildResourceRecordSetForAliasTarget(name, rset) - if rrs == nil { - this.Errorf("Corrupted alias record set %s[%s]", name, this.zone.Id()) - return - } + rrs, err = buildResourceRecordSetForAliasTarget(name, rset) } else { - rrs = buildResourceRecordSet(name, rset) + rrs, err = buildResourceRecordSet(name, rset) + } + if err != nil { + this.Errorf("addChange failed for %s[%s]: %s", name, this.zone.Id(), err) + return err } change := &route53.Change{Action: aws.String(action), ResourceRecordSet: rrs} this.addRawChange(name, dnsset.UpdateGroup, change, req.Done) + return nil } -func (this *Execution) addRawChange(name, updateGroup string, change *route53.Change, done provider.DoneHandler) { +func (this *Execution) addRawChange(name dns.RecordSetName, updateGroup string, change *route53.Change, done provider.DoneHandler) { this.changes[name] = append(this.changes[name], &Change{Change: change, Done: done, UpdateGroup: updateGroup}) } @@ -253,7 +258,7 @@ func safeCompareInt64(a, b *int64) bool { return *a == *b } -func limitChangeSet(changesByName map[string][]*Change, max int) [][]*Change { +func limitChangeSet(changesByName map[dns.RecordSetName][]*Change, max int) [][]*Change { batches := [][]*Change{} updateChanges := map[string][]*Change{} diff --git a/pkg/controller/provider/aws/handler.go b/pkg/controller/provider/aws/handler.go index a48d65fb8..572ae575c 100644 --- a/pkg/controller/provider/aws/handler.go +++ b/pkg/controller/provider/aws/handler.go @@ -33,6 +33,8 @@ import ( dnsutils "github.com/gardener/external-dns-management/pkg/dns/utils" ) +const () + type Handler struct { provider.DefaultDNSHandler config provider.DNSHandlerConfig @@ -194,6 +196,7 @@ func (h *Handler) getZones(cache provider.ZoneCache) (provider.DNSHostedZones, e func buildRecordSet(r *route53.ResourceRecordSet) *dns.RecordSet { rs := dns.NewRecordSet(aws.StringValue(r.Type), aws.Int64Value(r.TTL), nil) + rs.RoutingPolicy = extractRoutingPolicy(r) for _, rr := range r.ResourceRecords { rs.Add(&dns.Record{Value: aws.StringValue(rr.Value)}) } @@ -215,7 +218,7 @@ func (h *Handler) getZoneState(zone provider.DNSHostedZone, cache provider.ZoneC } else { rs = buildRecordSet(r) } - dnssets.AddRecordSetFromProvider(aws.StringValue(r.Name), rs) + dnssets.AddRecordSetFromProviderEx(dns.RecordSetName{DNSName: aws.StringValue(r.Name), SetIdentifier: aws.StringValue(r.SetIdentifier)}, rs) } } forwarded, err := h.handleRecordSets(zone, aggr) @@ -272,13 +275,19 @@ func (h *Handler) executeRequests(logger logger.LogContext, zone provider.DNSHos exec := NewExecution(logger, h, zone) for _, r := range reqs { + var err error switch r.Action { case provider.R_CREATE: - exec.addChange(route53.ChangeActionCreate, r, r.Addition) + err = exec.addChange(route53.ChangeActionCreate, r, r.Addition) case provider.R_UPDATE: - exec.addChange(route53.ChangeActionUpsert, r, r.Addition) + err = exec.addChange(route53.ChangeActionUpsert, r, r.Addition) case provider.R_DELETE: - exec.addChange(route53.ChangeActionDelete, r, r.Deletion) + err = exec.addChange(route53.ChangeActionDelete, r, r.Deletion) + } + if err != nil { + if r.Done != nil { + r.Done.SetInvalid(err) + } } } if h.config.DryRun { @@ -379,13 +388,17 @@ func (h *Handler) DeleteVPCAssociationAuthorization(hostedZoneId string, vpcId s return out, nil } -func (h *Handler) GetRecordSet(zone provider.DNSHostedZone, dnsName, recordType string) (provider.DedicatedRecordSet, error) { - name := dns.AlignHostname(dnsName) +func (h *Handler) GetRecordSet(zone provider.DNSHostedZone, rsName dns.RecordSetName, recordType string) (provider.DedicatedRecordSet, error) { + name := rsName.Align() + var recordIdentifier *string + if rsName.SetIdentifier != "" { + recordIdentifier = &rsName.SetIdentifier + } sets, err := h.r53.ListResourceRecordSets(&route53.ListResourceRecordSetsInput{ HostedZoneId: aws.String(zone.Id().ID), MaxItems: aws.String("1"), - StartRecordIdentifier: nil, - StartRecordName: &name, + StartRecordIdentifier: recordIdentifier, + StartRecordName: &name.DNSName, StartRecordType: &recordType, }) if err != nil { @@ -401,16 +414,17 @@ func (h *Handler) GetRecordSet(zone provider.DNSHostedZone, dnsName, recordType } else { rs = buildRecordSet(r) } - dnssets.AddRecordSetFromProvider(aws.StringValue(r.Name), rs) + rsName := dns.RecordSetName{DNSName: aws.StringValue(r.Name), SetIdentifier: aws.StringValue(r.SetIdentifier)} + dnssets.AddRecordSetFromProviderEx(rsName, rs) } } for _, r := range sets.ResourceRecordSets { - if aws.StringValue(r.Name) == name && aws.StringValue(r.Type) == recordType { + if aws.StringValue(r.Name) == name.DNSName && aws.StringValue(r.SetIdentifier) == name.SetIdentifier && aws.StringValue(r.Type) == recordType { aggr(r) } } - if set := dnssets[dnsName]; set != nil { - return provider.FromDedicatedRecordSet(dnsName, set.Sets[recordType]), nil + if set := dnssets[rsName]; set != nil { + return provider.FromDedicatedRecordSet(rsName, set.Sets[recordType]), nil } return nil, nil } @@ -428,6 +442,8 @@ func (h *Handler) executeRecordSetChange(action string, logger logger.LogContext dnsName, rs := provider.ToDedicatedRecordset(rawrs) dnsset := dns.NewDNSSet(dnsName) dnsset.Sets[rs.Type] = rs - exec.addChange(action, &provider.ChangeRequest{Type: rs.Type}, dnsset) + if err := exec.addChange(action, &provider.ChangeRequest{Type: rs.Type}, dnsset); err != nil { + return err + } return exec.submitChanges(h.config.Metrics) } diff --git a/pkg/controller/provider/aws/routingpolicy.go b/pkg/controller/provider/aws/routingpolicy.go new file mode 100644 index 000000000..cd4a056a3 --- /dev/null +++ b/pkg/controller/provider/aws/routingpolicy.go @@ -0,0 +1,76 @@ +/* + * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * + */ + +package aws + +import ( + "fmt" + "strconv" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/route53" + "github.com/gardener/external-dns-management/pkg/dns" +) + +func addRoutingPolicy(rrset *route53.ResourceRecordSet, name dns.RecordSetName, routingPolicy *dns.RoutingPolicy) error { + if name.SetIdentifier == "" && routingPolicy == nil { + return nil + } + if name.SetIdentifier == "" { + return fmt.Errorf("routing policy set, but missing set identifier") + } + if routingPolicy == nil { + return fmt.Errorf("set identifier set, but routing policy missing") + } + + var keys []string + switch routingPolicy.Type { + case dns.RoutingPolicyWeighted: + keys = []string{"weight"} + default: + return fmt.Errorf("unsupported routing policy type %s", routingPolicy.Type) + } + + if err := routingPolicy.CheckParameterKeys(keys); err != nil { + return err + } + + rrset.SetIdentifier = aws.String(name.SetIdentifier) + for key, value := range routingPolicy.Parameters { + switch key { + case "weight": + v, err := strconv.ParseInt(value, 0, 64) + if err != nil || v < 0 { + return fmt.Errorf("invalid value for spec.routingPolicy.parameters.weight: %s", value) + } + rrset.Weight = aws.Int64(v) + } + } + + return nil +} + +func extractRoutingPolicy(rrset *route53.ResourceRecordSet) *dns.RoutingPolicy { + if rrset.SetIdentifier == nil { + return nil + } + + if rrset.Weight != nil { + return dns.NewRoutingPolicy(dns.RoutingPolicyWeighted, "weight", strconv.FormatInt(*rrset.Weight, 10)) + } + // ignore unsupported routing policy + return nil +} diff --git a/pkg/controller/provider/azure-private/execution.go b/pkg/controller/provider/azure-private/execution.go index c214ba6ab..b3a21454f 100644 --- a/pkg/controller/provider/azure-private/execution.go +++ b/pkg/controller/provider/azure-private/execution.go @@ -48,11 +48,12 @@ func NewExecution(logger logger.LogContext, h *Handler, resourceGroup string, zo type buildStatus int const ( - bs_ok buildStatus = 0 - bs_invalidType buildStatus = 1 - bs_empty buildStatus = 2 - bs_dryrun buildStatus = 3 - bs_invalidName buildStatus = 4 + bs_ok buildStatus = 0 + bs_invalidType buildStatus = 1 + bs_empty buildStatus = 2 + bs_dryrun buildStatus = 3 + bs_invalidName buildStatus = 4 + bs_invalidRoutingPolicy buildStatus = 5 ) func (exec *Execution) buildRecordSet(req *provider.ChangeRequest) (buildStatus, azure.RecordType, *azure.RecordSet) { @@ -74,6 +75,10 @@ func (exec *Execution) buildRecordSet(req *provider.ChangeRequest) (buildStatus, return bs_empty, "", nil } + if req.RoutingPolicy != nil { + return bs_invalidRoutingPolicy, "", nil + } + exec.Infof("Desired %s: %s record set %s[%s] with TTL %d: %s", req.Action, rset.Type, name, exec.zoneName, rset.TTL, rset.RecordString()) return exec.buildMappedRecordSet(name, rset) } diff --git a/pkg/controller/provider/azure-private/handler.go b/pkg/controller/provider/azure-private/handler.go index 53ddd43c7..57282334d 100644 --- a/pkg/controller/provider/azure-private/handler.go +++ b/pkg/controller/provider/azure-private/handler.go @@ -195,20 +195,29 @@ func (h *Handler) executeRequests(logger logger.LogContext, zone provider.DNSHos var succeeded, failed int for _, r := range reqs { status, recordType, rset := exec.buildRecordSet(r) - if status == bs_empty || status == bs_dryrun { + switch status { + case bs_empty: continue - } else if status == bs_invalidType { + case bs_dryrun: + continue + case bs_invalidType: err := fmt.Errorf("Unexpected record type: %s", r.Type) if r.Done != nil { r.Done.SetInvalid(err) } continue - } else if status == bs_invalidName { + case bs_invalidName: err := fmt.Errorf("Unexpected dns name: %s", *rset.Name) if r.Done != nil { r.Done.SetInvalid(err) } continue + case bs_invalidRoutingPolicy: + err := fmt.Errorf("Routing policies not supported for " + TYPE_CODE) + if r.Done != nil { + r.Done.SetInvalid(err) + } + continue } err := exec.apply(r.Action, recordType, rset, h.config.Metrics) diff --git a/pkg/controller/provider/azure/execution.go b/pkg/controller/provider/azure/execution.go index 91df134f1..0f682585e 100644 --- a/pkg/controller/provider/azure/execution.go +++ b/pkg/controller/provider/azure/execution.go @@ -48,11 +48,12 @@ func NewExecution(logger logger.LogContext, h *Handler, resourceGroup string, zo type buildStatus int const ( - bs_ok buildStatus = 0 - bs_invalidType buildStatus = 1 - bs_empty buildStatus = 2 - bs_dryrun buildStatus = 3 - bs_invalidName buildStatus = 4 + bs_ok buildStatus = 0 + bs_invalidType buildStatus = 1 + bs_empty buildStatus = 2 + bs_dryrun buildStatus = 3 + bs_invalidName buildStatus = 4 + bs_invalidRoutingPolicy buildStatus = 5 ) func (exec *Execution) buildRecordSet(req *provider.ChangeRequest) (buildStatus, azure.RecordType, *azure.RecordSet) { @@ -74,6 +75,10 @@ func (exec *Execution) buildRecordSet(req *provider.ChangeRequest) (buildStatus, return bs_empty, "", nil } + if req.RoutingPolicy != nil { + return bs_invalidRoutingPolicy, "", nil + } + exec.Infof("Desired %s: %s record set %s[%s] with TTL %d: %s", req.Action, rset.Type, name, exec.zoneName, rset.TTL, rset.RecordString()) return exec.buildMappedRecordSet(name, rset) } diff --git a/pkg/controller/provider/azure/handler.go b/pkg/controller/provider/azure/handler.go index edf3ebb90..0bad83a55 100644 --- a/pkg/controller/provider/azure/handler.go +++ b/pkg/controller/provider/azure/handler.go @@ -220,20 +220,29 @@ func (h *Handler) executeRequests(logger logger.LogContext, zone provider.DNSHos var succeeded, failed int for _, r := range reqs { status, recordType, rset := exec.buildRecordSet(r) - if status == bs_empty || status == bs_dryrun { + switch status { + case bs_empty: continue - } else if status == bs_invalidType { + case bs_dryrun: + continue + case bs_invalidType: err := fmt.Errorf("Unexpected record type: %s", r.Type) if r.Done != nil { r.Done.SetInvalid(err) } continue - } else if status == bs_invalidName { + case bs_invalidName: err := fmt.Errorf("Unexpected dns name: %s", *rset.Name) if r.Done != nil { r.Done.SetInvalid(err) } continue + case bs_invalidRoutingPolicy: + err := fmt.Errorf("Routing policies not supported for " + TYPE_CODE) + if r.Done != nil { + r.Done.SetInvalid(err) + } + continue } err := exec.apply(r.Action, recordType, rset, h.config.Metrics) diff --git a/pkg/controller/provider/cloudflare/state.go b/pkg/controller/provider/cloudflare/state.go index a9c17513d..24b3dfca4 100644 --- a/pkg/controller/provider/cloudflare/state.go +++ b/pkg/controller/provider/cloudflare/state.go @@ -25,9 +25,12 @@ import ( type Record cloudflare.DNSRecord -func (r *Record) GetType() string { return r.Type } -func (r *Record) GetId() string { return r.ID } -func (r *Record) GetDNSName() string { return r.Name } +var _ raw.Record = &Record{} + +func (r *Record) GetType() string { return r.Type } +func (r *Record) GetId() string { return r.ID } +func (r *Record) GetDNSName() string { return r.Name } +func (r *Record) GetSetIdentifier() string { return "" } func (r *Record) GetValue() string { if r.Type == dns.RS_TXT { return raw.EnsureQuotedText(r.Content) diff --git a/pkg/controller/provider/google/execution.go b/pkg/controller/provider/google/execution.go index b1885f53a..62327c356 100644 --- a/pkg/controller/provider/google/execution.go +++ b/pkg/controller/provider/google/execution.go @@ -17,6 +17,8 @@ package google import ( + "fmt" + "github.com/gardener/controller-manager-library/pkg/logger" "github.com/gardener/controller-manager-library/pkg/utils" googledns "google.golang.org/api/dns/v1" @@ -65,6 +67,13 @@ func (this *Execution) addChange(req *provider.ChangeRequest) { if name == "" || (newset.Length() == 0 && oldset.Length() == 0) { return } + if req.RoutingPolicy != nil { + err := fmt.Errorf("Routing policies unsupported for " + TYPE_CODE) + if req.Done != nil { + req.Done.SetInvalid(err) + } + return + } name = dns.AlignHostname(name) switch req.Action { case provider.R_CREATE: diff --git a/pkg/controller/provider/infoblox/state.go b/pkg/controller/provider/infoblox/state.go index 1349ac22f..ebedb830d 100644 --- a/pkg/controller/provider/infoblox/state.go +++ b/pkg/controller/provider/infoblox/state.go @@ -31,13 +31,14 @@ type Record interface { type RecordA ibclient.RecordA -func (r *RecordA) GetType() string { return dns.RS_A } -func (r *RecordA) GetId() string { return r.Ref } -func (r *RecordA) GetDNSName() string { return r.Name } -func (r *RecordA) GetValue() string { return r.Ipv4Addr } -func (r *RecordA) GetTTL() int { return int(r.Ttl) } -func (r *RecordA) SetTTL(ttl int) { r.Ttl = uint32(ttl); r.UseTtl = ttl != 0 } -func (r *RecordA) Copy() raw.Record { n := *r; return &n } +func (r *RecordA) GetType() string { return dns.RS_A } +func (r *RecordA) GetId() string { return r.Ref } +func (r *RecordA) GetDNSName() string { return r.Name } +func (r *RecordA) GetSetIdentifier() string { return "" } +func (r *RecordA) GetValue() string { return r.Ipv4Addr } +func (r *RecordA) GetTTL() int { return int(r.Ttl) } +func (r *RecordA) SetTTL(ttl int) { r.Ttl = uint32(ttl); r.UseTtl = ttl != 0 } +func (r *RecordA) Copy() raw.Record { n := *r; return &n } func (r *RecordA) PrepareUpdate() raw.Record { n := *r n.Zone = "" @@ -48,13 +49,14 @@ func (r *RecordA) PrepareUpdate() raw.Record { type RecordAAAA ibclient.RecordAAAA -func (r *RecordAAAA) GetType() string { return dns.RS_A } -func (r *RecordAAAA) GetId() string { return r.Ref } -func (r *RecordAAAA) GetDNSName() string { return r.Name } -func (r *RecordAAAA) GetValue() string { return r.Ipv6Addr } -func (r *RecordAAAA) GetTTL() int { return int(r.Ttl) } -func (r *RecordAAAA) SetTTL(ttl int) { r.Ttl = uint32(ttl); r.UseTtl = ttl != 0 } -func (r *RecordAAAA) Copy() raw.Record { n := *r; return &n } +func (r *RecordAAAA) GetType() string { return dns.RS_A } +func (r *RecordAAAA) GetId() string { return r.Ref } +func (r *RecordAAAA) GetDNSName() string { return r.Name } +func (r *RecordAAAA) GetSetIdentifier() string { return "" } +func (r *RecordAAAA) GetValue() string { return r.Ipv6Addr } +func (r *RecordAAAA) GetTTL() int { return int(r.Ttl) } +func (r *RecordAAAA) SetTTL(ttl int) { r.Ttl = uint32(ttl); r.UseTtl = ttl != 0 } +func (r *RecordAAAA) Copy() raw.Record { n := *r; return &n } func (r *RecordAAAA) PrepareUpdate() raw.Record { n := *r n.Zone = "" @@ -68,6 +70,7 @@ type RecordCNAME ibclient.RecordCNAME func (r *RecordCNAME) GetType() string { return dns.RS_CNAME } func (r *RecordCNAME) GetId() string { return r.Ref } func (r *RecordCNAME) GetDNSName() string { return r.Name } +func (r *RecordCNAME) GetSetIdentifier() string { return "" } func (r *RecordCNAME) GetValue() string { return r.Canonical } func (r *RecordCNAME) GetTTL() int { return int(r.Ttl) } func (r *RecordCNAME) SetTTL(ttl int) { r.Ttl = uint32(ttl); r.UseTtl = ttl != 0 } @@ -79,6 +82,7 @@ type RecordTXT ibclient.RecordTXT func (r *RecordTXT) GetType() string { return dns.RS_TXT } func (r *RecordTXT) GetId() string { return r.Ref } func (r *RecordTXT) GetDNSName() string { return r.Name } +func (r *RecordTXT) GetSetIdentifier() string { return "" } func (r *RecordTXT) GetValue() string { return raw.EnsureQuotedText(r.Text) } func (r *RecordTXT) GetTTL() int { return int(r.Ttl) } func (r *RecordTXT) SetTTL(ttl int) { r.Ttl = uint(ttl); r.UseTtl = ttl != 0 } @@ -86,6 +90,7 @@ func (r *RecordTXT) Copy() raw.Record { n := *r; return &n } func (r *RecordTXT) PrepareUpdate() raw.Record { n := *r; n.Zone = ""; n.View = ""; return &n } var _ raw.Record = (*RecordA)(nil) +var _ raw.Record = (*RecordAAAA)(nil) var _ raw.Record = (*RecordCNAME)(nil) var _ raw.Record = (*RecordTXT)(nil) diff --git a/pkg/controller/provider/netlify/state.go b/pkg/controller/provider/netlify/state.go index 2cfb9b22e..ee97b7245 100644 --- a/pkg/controller/provider/netlify/state.go +++ b/pkg/controller/provider/netlify/state.go @@ -25,9 +25,12 @@ import ( type Record models.DNSRecord -func (r *Record) GetType() string { return r.Type } -func (r *Record) GetId() string { return r.ID } -func (r *Record) GetDNSName() string { return r.Hostname } +var _ raw.Record = &Record{} + +func (r *Record) GetType() string { return r.Type } +func (r *Record) GetId() string { return r.ID } +func (r *Record) GetDNSName() string { return r.Hostname } +func (r *Record) GetSetIdentifier() string { return "" } func (r *Record) GetValue() string { if r.Type == dns.RS_TXT { return raw.EnsureQuotedText(r.Value) diff --git a/pkg/controller/provider/openstack/execution.go b/pkg/controller/provider/openstack/execution.go index c7445c7b4..cc4b197d5 100644 --- a/pkg/controller/provider/openstack/execution.go +++ b/pkg/controller/provider/openstack/execution.go @@ -46,9 +46,10 @@ func NewExecution(logger logger.LogContext, h *Handler, zone provider.DNSHostedZ type buildStatus int const ( - bsOk buildStatus = 0 - bsEmpty buildStatus = 2 - bsDryRun buildStatus = 3 + bsOk buildStatus = 0 + bsEmpty buildStatus = 2 + bsDryRun buildStatus = 3 + bsInvalidRoutingPolicy buildStatus = 4 ) func (exec *Execution) buildRecordSet(req *provider.ChangeRequest) (buildStatus, *recordsets.RecordSet) { @@ -66,6 +67,10 @@ func (exec *Execution) buildRecordSet(req *provider.ChangeRequest) (buildStatus, return bsEmpty, nil } + if req.RoutingPolicy != nil { + return bsInvalidRoutingPolicy, nil + } + exec.Infof("Desired %s: %s record set %s[%s]: %s", req.Action, rset.Type, name, exec.zone.Domain(), rset.RecordString()) return exec.buildMappedRecordSet(name, rset) } diff --git a/pkg/controller/provider/openstack/handler.go b/pkg/controller/provider/openstack/handler.go index f6e662c49..52cff03a6 100644 --- a/pkg/controller/provider/openstack/handler.go +++ b/pkg/controller/provider/openstack/handler.go @@ -249,6 +249,13 @@ func (h *Handler) executeRequests(logger logger.LogContext, zone provider.DNSHos if status == bsEmpty || status == bsDryRun { continue } + if status == bsInvalidRoutingPolicy { + err := fmt.Errorf("Routing policies unsupported for " + TYPE_CODE) + if r.Done != nil { + r.Done.SetInvalid(err) + } + continue + } err := exec.apply(r.Action, rset) if err != nil { diff --git a/pkg/controller/provider/openstack/handler_test.go b/pkg/controller/provider/openstack/handler_test.go index 267167772..4f89fa93b 100644 --- a/pkg/controller/provider/openstack/handler_test.go +++ b/pkg/controller/provider/openstack/handler_test.go @@ -323,23 +323,26 @@ func TestGetZoneStateAndExecuteRequests(t *testing.T) { } stdMeta := buildRecordSet("META", 600, "\"owner=test\"", "\"prefix=comment-\"") + sub1 := dns.RecordSetName{DNSName: "sub1.z1.test"} + sub2 := dns.RecordSetName{DNSName: "sub2.z1.test"} + sub3 := dns.RecordSetName{DNSName: "sub3.z1.test"} expectedDnssets := dns.DNSSets{ - "sub1.z1.test": &dns.DNSSet{ - Name: "sub1.z1.test", + sub1: &dns.DNSSet{ + Name: sub1, Sets: dns.RecordSets{ "A": buildRecordSet("A", 301, "1.2.3.4", "5.6.7.8"), "META": stdMeta, }, }, - "sub2.z1.test": &dns.DNSSet{ - Name: "sub2.z1.test", + sub2: &dns.DNSSet{ + Name: sub2, Sets: dns.RecordSets{ "CNAME": buildRecordSet("CNAME", 302, "cname.target.test"), "META": stdMeta, }, }, - "sub3.z1.test": &dns.DNSSet{ - Name: "sub3.z1.test", + dns.RecordSetName{DNSName: "sub3.z1.test"}: &dns.DNSSet{ + Name: sub3, Sets: dns.RecordSets{ "TXT": buildRecordSet("TXT", 303, "foo", "bar"), }, @@ -352,12 +355,13 @@ func TestGetZoneStateAndExecuteRequests(t *testing.T) { Ω(actualDnssets).Should(Equal(expectedDnssets)) tlog := logger.New() + sub4 := dns.RecordSetName{DNSName: "sub4.z1.test"} reqs := []*provider.ChangeRequest{ { Action: provider.R_CREATE, Type: "A", Addition: &dns.DNSSet{ - Name: "sub4.z1.test", + Name: sub4, Sets: dns.RecordSets{ "A": buildRecordSet("A", 304, "11.22.33.44"), }, @@ -367,7 +371,7 @@ func TestGetZoneStateAndExecuteRequests(t *testing.T) { Action: provider.R_CREATE, Type: "META", Addition: &dns.DNSSet{ - Name: "sub4.z1.test", + Name: sub4, Sets: dns.RecordSets{ "META": stdMeta, }, @@ -377,7 +381,7 @@ func TestGetZoneStateAndExecuteRequests(t *testing.T) { Action: provider.R_UPDATE, Type: "A", Addition: &dns.DNSSet{ - Name: "sub1.z1.test", + Name: sub1, Sets: dns.RecordSets{ "A": buildRecordSet("A", 305, "1.2.3.55", "5.6.7.8"), }, @@ -386,32 +390,32 @@ func TestGetZoneStateAndExecuteRequests(t *testing.T) { { Action: provider.R_DELETE, Type: "CNAME", - Deletion: expectedDnssets["sub2.z1.test"], + Deletion: expectedDnssets[sub2], }, { Action: provider.R_DELETE, Type: "META", - Deletion: expectedDnssets["sub2.z1.test"], + Deletion: expectedDnssets[sub2], }, { Action: provider.R_DELETE, Type: "TXT", - Deletion: expectedDnssets["sub3.z1.test"], + Deletion: expectedDnssets[sub3], }, } err = h.ExecuteRequests(tlog, hostedZone, zoneState2, reqs) Ω(err).Should(BeNil(), "ExecuteRequests failed") expectedDnssets2 := dns.DNSSets{ - "sub1.z1.test": &dns.DNSSet{ - Name: "sub1.z1.test", + sub1: &dns.DNSSet{ + Name: sub1, Sets: dns.RecordSets{ "A": buildRecordSet("A", 305, "1.2.3.55", "5.6.7.8"), "META": stdMeta, }, }, - "sub4.z1.test": &dns.DNSSet{ - Name: "sub4.z1.test", + sub4: &dns.DNSSet{ + Name: sub4, Sets: dns.RecordSets{ "A": buildRecordSet("A", 304, "11.22.33.44"), "META": stdMeta, @@ -425,7 +429,7 @@ func TestGetZoneStateAndExecuteRequests(t *testing.T) { return } actualDnssets2 := zoneState3.GetDNSSets() - Ω(actualDnssets2["sub1.z1.test"]).Should(Equal(expectedDnssets2["sub1.z1.test"])) - Ω(actualDnssets2["sub4.z1.test"]).Should(Equal(expectedDnssets2["sub4.z1.test"])) + Ω(actualDnssets2[sub1]).Should(Equal(expectedDnssets2[sub1])) + Ω(actualDnssets2[sub4]).Should(Equal(expectedDnssets2[sub4])) Ω(actualDnssets2).Should(Equal(expectedDnssets2)) } diff --git a/pkg/dns/dnsset.go b/pkg/dns/dnsset.go index a3287d04f..d921afc8e 100644 --- a/pkg/dns/dnsset.go +++ b/pkg/dns/dnsset.go @@ -17,6 +17,8 @@ package dns import ( + "fmt" + "github.com/gardener/controller-manager-library/pkg/utils" api "github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1" @@ -53,21 +55,25 @@ import ( // or writing a record set, respectively. The map the given set to // an effective set and dns name for the desired purpose. -type DNSSets map[string]*DNSSet +type DNSSets map[RecordSetName]*DNSSet type Ownership interface { IsResponsibleFor(id string) bool GetIds() utils.StringSet } -func (dnssets DNSSets) AddRecordSetFromProvider(dnsname string, rs *RecordSet) { - name := NormalizeHostname(dnsname) +func (dnssets DNSSets) AddRecordSetFromProvider(dnsName string, rs *RecordSet) { + dnssets.AddRecordSetFromProviderEx(RecordSetName{DNSName: dnsName}, rs) +} + +func (dnssets DNSSets) AddRecordSetFromProviderEx(rsName RecordSetName, rs *RecordSet) { + name := rsName.Normalize() name, rs = MapFromProvider(name, rs) dnssets.AddRecordSet(name, rs) } -func (dnssets DNSSets) AddRecordSet(name string, rs *RecordSet) { +func (dnssets DNSSets) AddRecordSet(name RecordSetName, rs *RecordSet) { dnsset := dnssets[name] if dnsset == nil { dnsset = NewDNSSet(name) @@ -76,7 +82,7 @@ func (dnssets DNSSets) AddRecordSet(name string, rs *RecordSet) { dnsset.Sets[rs.Type] = rs } -func (dnssets DNSSets) RemoveRecordSet(name string, recordSetType string) { +func (dnssets DNSSets) RemoveRecordSet(name RecordSetName, recordSetType string) { dnsset := dnssets[name] if dnsset != nil { delete(dnsset.Sets, recordSetType) @@ -116,8 +122,82 @@ const ( ATTR_LOCKID = "lockid" ) +type RecordSetName struct { + // domain name of the record + DNSName string + // optional set identifier (used for record with routing policy) + SetIdentifier string +} + +func (n RecordSetName) WithDNSName(dnsName string) RecordSetName { + return RecordSetName{DNSName: dnsName, SetIdentifier: n.SetIdentifier} +} + +func (n RecordSetName) String() string { + if n.SetIdentifier == "" { + return n.DNSName + } + return n.DNSName + "#" + n.SetIdentifier +} + +func (n RecordSetName) Align() RecordSetName { + return n.WithDNSName(AlignHostname(n.DNSName)) +} + +func (n RecordSetName) Normalize() RecordSetName { + return n.WithDNSName(NormalizeHostname(n.DNSName)) +} + +const ( + RoutingPolicyWeighted = "weighted" +) + +type RoutingPolicy struct { + Type string + Parameters map[string]string +} + +func NewRoutingPolicy(typ string, keyvalues ...string) *RoutingPolicy { + policy := &RoutingPolicy{Type: typ, Parameters: map[string]string{}} + for i := 0; i < len(keyvalues)-1; i += 2 { + policy.Parameters[keyvalues[i]] = keyvalues[i+1] + } + return policy +} + +func (p *RoutingPolicy) Clone() *RoutingPolicy { + if p == nil { + return nil + } + copy := &RoutingPolicy{Type: p.Type, Parameters: map[string]string{}} + for k, v := range p.Parameters { + copy.Parameters[k] = v + } + return copy +} + +func (p *RoutingPolicy) CheckParameterKeys(keys []string) error { + for _, k := range keys { + if _, ok := p.Parameters[k]; !ok { + return fmt.Errorf("Missing parameter key %s", k) + } + } + if len(keys) != len(p.Parameters) { + outer: + for k := range p.Parameters { + for _, k2 := range keys { + if k == k2 { + continue outer + } + } + return fmt.Errorf("Unsupported parameter key %s", k) + } + } + return nil +} + type DNSSet struct { - Name string + Name RecordSetName Kind string UpdateGroup string Sets RecordSets @@ -135,10 +215,10 @@ func (this *DNSSet) getAttr(ty string, name string) string { return "" } -func (this *DNSSet) setAttr(ty string, name string, value string) { +func (this *DNSSet) setAttr(ty string, name string, value string, policy *RoutingPolicy) { rset := this.Sets[ty] if rset == nil { - rset = newAttrRecordSet(ty, name, value) + rset = newAttrRecordSet(ty, name, value, policy) this.Sets[rset.Type] = rset } else { rset.SetAttr(name, value) @@ -156,8 +236,8 @@ func (this *DNSSet) GetTxtAttr(name string) string { return this.getAttr(RS_TXT, name) } -func (this *DNSSet) SetTxtAttr(name string, value string) { - this.setAttr(RS_TXT, name, value) +func (this *DNSSet) SetTxtAttr(name string, value string, policy *RoutingPolicy) { + this.setAttr(RS_TXT, name, value, policy) } func (this *DNSSet) DeleteTxtAttr(name string) { @@ -168,8 +248,8 @@ func (this *DNSSet) GetMetaAttr(name string) string { return this.getAttr(RS_META, name) } -func (this *DNSSet) SetMetaAttr(name string, value string) { - this.setAttr(RS_META, name, value) +func (this *DNSSet) SetMetaAttr(name string, value string, policy *RoutingPolicy) { + this.setAttr(RS_META, name, value, policy) } func (this *DNSSet) DeleteMetaAttr(name string) { @@ -190,8 +270,8 @@ func (this *DNSSet) GetOwner() string { return this.GetMetaAttr(ATTR_OWNER) } -func (this *DNSSet) SetOwner(ownerid string) *DNSSet { - this.SetMetaAttr(ATTR_OWNER, ownerid) +func (this *DNSSet) SetOwner(ownerid string, policy *RoutingPolicy) *DNSSet { + this.SetMetaAttr(ATTR_OWNER, ownerid, policy) return this } @@ -209,7 +289,7 @@ func (this *DNSSet) SetKind(t string, prop ...bool) *DNSSet { this.Kind = t if t != api.DNSEntryKind { if len(prop) == 0 || prop[0] { - this.SetMetaAttr(ATTR_KIND, t) + this.SetMetaAttr(ATTR_KIND, t, nil) } } else { this.DeleteMetaAttr(ATTR_KIND) @@ -217,14 +297,14 @@ func (this *DNSSet) SetKind(t string, prop ...bool) *DNSSet { return this } -func (this *DNSSet) SetRecordSet(rtype string, ttl int64, values ...string) { +func (this *DNSSet) SetRecordSet(rtype string, ttl int64, routingPolicy *RoutingPolicy, values ...string) { records := make([]*Record, len(values)) for i, r := range values { records[i] = &Record{Value: r} } - this.Sets[rtype] = &RecordSet{rtype, ttl, false, records} + this.Sets[rtype] = &RecordSet{Type: rtype, TTL: ttl, IgnoreTTL: false, RoutingPolicy: routingPolicy, Records: records} } -func NewDNSSet(name string) *DNSSet { +func NewDNSSet(name RecordSetName) *DNSSet { return &DNSSet{Name: name, Sets: map[string]*RecordSet{}} } diff --git a/pkg/dns/mapping.go b/pkg/dns/mapping.go index d295d9e6e..a31c425de 100644 --- a/pkg/dns/mapping.go +++ b/pkg/dns/mapping.go @@ -44,20 +44,25 @@ func NormalizeHostname(host string) string { } func MapToProvider(rtype string, dnsset *DNSSet, base string) (string, *RecordSet) { - name := dnsset.Name + rsName, rs := MapToProviderEx(rtype, dnsset, base, nil) + return rsName.DNSName, rs +} + +func MapToProviderEx(rtype string, dnsset *DNSSet, base string, policy *RoutingPolicy) (RecordSetName, *RecordSet) { + dnsName := dnsset.Name.DNSName rs := dnsset.Sets[rtype] if rtype == RS_META { prefix := dnsset.GetMetaAttr(ATTR_PREFIX) if prefix == "" { prefix = TxtPrefix - dnsset.SetMetaAttr(ATTR_PREFIX, prefix) + dnsset.SetMetaAttr(ATTR_PREFIX, prefix, policy) } - metaName := calcMetaRecordDomainName(name, prefix, base) + metaName := calcMetaRecordDomainName(dnsName, prefix, base) new := *dnsset.Sets[rtype] new.Type = RS_TXT - return metaName, &new + return dnsset.Name.WithDNSName(metaName), &new } - return name, rs + return dnsset.Name, rs } func calcMetaRecordDomainName(name, prefix, base string) string { @@ -78,7 +83,8 @@ func CalcMetaRecordDomainNameForValidation(name string) string { return calcMetaRecordDomainName(name, TxtPrefix, "") } -func MapFromProvider(dns string, rs *RecordSet) (string, *RecordSet) { +func MapFromProvider(name RecordSetName, rs *RecordSet) (RecordSetName, *RecordSet) { + dns := name.DNSName if rs.Type == RS_TXT { prefix := rs.GetAttr(ATTR_PREFIX) if prefix != "" { @@ -97,11 +103,11 @@ func MapFromProvider(dns string, rs *RecordSet) (string, *RecordSet) { // for backwards compatibility of form *.comment-.basedomain dns = dns[1:] } - return add + dns, &new + return name.WithDNSName(add + dns), &new } else { - return add + dns, rs + return name.WithDNSName(add + dns), rs } } } - return dns, rs + return name.WithDNSName(dns), rs } diff --git a/pkg/dns/mapping_test.go b/pkg/dns/mapping_test.go index 79d204102..4ec13a3b3 100644 --- a/pkg/dns/mapping_test.go +++ b/pkg/dns/mapping_test.go @@ -76,20 +76,20 @@ func TestMapToFromProvider(t *testing.T) { wantedRecords = append(inputRecords, &Record{"\"prefix=comment-\""}) } dnsset := DNSSet{ - Name: entry.domainName, + Name: RecordSetName{DNSName: entry.domainName}, Sets: RecordSets{RS_META: &RecordSet{Type: RS_META, TTL: 600, Records: inputRecords}}, } - actualName, actualRecordSet := MapToProvider(rtype, &dnsset, base) + actualName, actualRecordSet := MapToProviderEx(rtype, &dnsset, base, nil) - Ω(actualName).Should(Equal(entry.wantedName), "Name should match") + Ω(actualName).Should(Equal(RecordSetName{DNSName: entry.wantedName}), "Name should match") Ω(actualRecordSet.Type).Should(Equal(RS_TXT), "Type mismatch") Ω(actualRecordSet.TTL).Should(Equal(int64(600)), "TTL mismatch") Ω(actualRecordSet.Records).Should(Equal(wantedRecords)) reversedName, reversedRecordSet := MapFromProvider(actualName, actualRecordSet) - Ω(reversedName).Should(Equal(entry.domainName), "Reversed name should match") + Ω(reversedName).Should(Equal(RecordSetName{DNSName: entry.domainName}), "Reversed name should match") Ω(reversedRecordSet.Type).Should(Equal(RS_META), "Reversed RecordSet.Type should match") Ω(reversedRecordSet.TTL).Should(Equal(int64(600)), "TTL mismatch") Ω(reversedRecordSet.Records).Should(Equal(wantedRecords)) diff --git a/pkg/dns/provider/changemodel.go b/pkg/dns/provider/changemodel.go index 592c607c0..4ffbfe9fc 100644 --- a/pkg/dns/provider/changemodel.go +++ b/pkg/dns/provider/changemodel.go @@ -44,15 +44,49 @@ const ( type ChangeRequests []*ChangeRequest type ChangeRequest struct { - Action string - Type string - Addition *dns.DNSSet - Deletion *dns.DNSSet - Done DoneHandler + Action string + Type string + Addition *dns.DNSSet + Deletion *dns.DNSSet + Done DoneHandler + Applied bool + RoutingPolicy *dns.RoutingPolicy } -func NewChangeRequest(action string, rtype string, del, add *dns.DNSSet, done DoneHandler) *ChangeRequest { - return &ChangeRequest{Action: action, Type: rtype, Addition: add, Deletion: del, Done: done} +func NewChangeRequest(action string, rtype string, del, add *dns.DNSSet, done DoneHandler, policy *dns.RoutingPolicy) *ChangeRequest { + r := &ChangeRequest{Action: action, Type: rtype, Addition: add, Deletion: del, RoutingPolicy: policy} + r.Done = &applyingDoneHandler{changeRequest: r, inner: done} + return r +} + +type applyingDoneHandler struct { + changeRequest *ChangeRequest + inner DoneHandler +} + +func (h *applyingDoneHandler) SetInvalid(err error) { + if h.inner != nil { + h.inner.SetInvalid(err) + } +} + +func (h *applyingDoneHandler) Failed(err error) { + if h.inner != nil { + h.inner.Failed(err) + } +} + +func (h *applyingDoneHandler) Throttled() { + if h.inner != nil { + h.inner.Throttled() + } +} + +func (h *applyingDoneHandler) Succeeded() { + h.changeRequest.Applied = true + if h.inner != nil { + h.inner.Succeeded() + } } type ChangeGroup struct { @@ -77,7 +111,7 @@ func (this *ChangeGroup) cleanup(logger logger.LogContext, model *ChangeModel) b if model.ExistsInEquivalentZone(s.Name) { continue } - if e := model.IsStale(ZonedDNSName{ZoneID: model.ZoneId(), DNSName: s.Name}); e != nil { + if e := model.IsStale(ZonedRecordSetName{ZoneID: model.ZoneId(), RecordSetName: s.Name}); e != nil { if e.IsDeleting() { model.failedDNSNames.Add(s.Name) // preventing deletion of stale entry } @@ -99,7 +133,7 @@ func (this *ChangeGroup) cleanup(logger logger.LogContext, model *ChangeModel) b model.Infof("found unapplied managed set '%s'", s.Name) var done DoneHandler for _, e := range model.context.entries { - if e.dnsname == s.Name { + if e.rsname == s.Name { done = NewStatusUpdate(logger, e, model.context.fhandler) break } @@ -132,17 +166,17 @@ func (this *ChangeGroup) update(logger logger.LogContext, model *ChangeModel) bo return ok } -func (this *ChangeGroup) addCreateRequest(dnsset *dns.DNSSet, rtype string, done DoneHandler) { - this.addChangeRequest(R_CREATE, nil, dnsset, rtype, done) +func (this *ChangeGroup) addCreateRequest(dnsset *dns.DNSSet, rtype string, done DoneHandler, policy *dns.RoutingPolicy) { + this.addChangeRequest(R_CREATE, nil, dnsset, rtype, done, policy) } -func (this *ChangeGroup) addUpdateRequest(old, new *dns.DNSSet, rtype string, done DoneHandler) { - this.addChangeRequest(R_UPDATE, old, new, rtype, done) +func (this *ChangeGroup) addUpdateRequest(old, new *dns.DNSSet, rtype string, done DoneHandler, policy *dns.RoutingPolicy) { + this.addChangeRequest(R_UPDATE, old, new, rtype, done, policy) } func (this *ChangeGroup) addDeleteRequest(dnsset *dns.DNSSet, rtype string, done DoneHandler) { - this.addChangeRequest(R_DELETE, dnsset, nil, rtype, done) + this.addChangeRequest(R_DELETE, dnsset, nil, rtype, done, nil) } -func (this *ChangeGroup) addChangeRequest(action string, old, new *dns.DNSSet, rtype string, done DoneHandler) { - r := NewChangeRequest(action, rtype, old, new, done) +func (this *ChangeGroup) addChangeRequest(action string, old, new *dns.DNSSet, rtype string, done DoneHandler, policy *dns.RoutingPolicy) { + r := NewChangeRequest(action, rtype, old, new, done, policy) this.requests = append(this.requests, r) } @@ -157,11 +191,11 @@ type ChangeModel struct { config Config ownership dns.Ownership context *zoneReconciliation - applied map[string]*dns.DNSSet + applied map[dns.RecordSetName]*dns.DNSSet dangling *ChangeGroup providergroups map[string]*ChangeGroup zonestate DNSZoneState - failedDNSNames utils.StringSet + failedDNSNames RecordSetNameSet } type ChangeResult struct { @@ -176,18 +210,18 @@ func NewChangeModel(logger logger.LogContext, ownership dns.Ownership, req *zone config: config, ownership: ownership, context: req, - applied: map[string]*dns.DNSSet{}, + applied: map[dns.RecordSetName]*dns.DNSSet{}, providergroups: map[string]*ChangeGroup{}, - failedDNSNames: utils.StringSet{}, + failedDNSNames: RecordSetNameSet{}, } } -func (this *ChangeModel) IsStale(dns ZonedDNSName) *Entry { +func (this *ChangeModel) IsStale(dns ZonedRecordSetName) *Entry { return this.context.stale[dns] } -func (this *ChangeModel) ExistsInEquivalentZone(dnsName string) bool { - return this.context.equivEntries != nil && this.context.equivEntries.Contains(dnsName) +func (this *ChangeModel) ExistsInEquivalentZone(name dns.RecordSetName) bool { + return this.context.equivEntries != nil && this.context.equivEntries.Contains(name) } func (this *ChangeModel) getProviderView(p DNSProvider) *ChangeGroup { @@ -240,17 +274,17 @@ func (this *ChangeModel) Setup() error { sets := this.zonestate.GetDNSSets() this.context.zone.SetOwners(sets.GetOwners()) this.dangling = newChangeGroup("dangling entries", provider, this) - for dnsName, set := range sets { + for rsName, set := range sets { var view *ChangeGroup - provider = this.context.providers.LookupFor(dnsName) + provider = this.context.providers.LookupFor(rsName.DNSName) if provider != nil { - this.dumpf(" %s: %d types (provider %s)", dnsName, len(set.Sets), provider.ObjectName()) + this.dumpf(" %s: %d types (provider %s)", rsName, len(set.Sets), provider.ObjectName()) view = this.getProviderView(provider) } else { - this.dumpf(" %s: %d types (no provider)", dnsName, len(set.Sets)) + this.dumpf(" %s: %d types (no provider)", rsName, len(set.Sets)) view = this.dangling } - view.dnssets[dnsName] = set + view.dnssets[rsName] = set for t, r := range set.Sets { this.dumpf(" %s: %d records: %s", t, len(r.Records), r.RecordString()) } @@ -259,20 +293,20 @@ func (this *ChangeModel) Setup() error { return err } -func (this *ChangeModel) Check(name, updateGroup string, createdAt time.Time, done DoneHandler, spec TargetSpec) ChangeResult { +func (this *ChangeModel) Check(name dns.RecordSetName, updateGroup string, createdAt time.Time, done DoneHandler, spec TargetSpec) ChangeResult { return this.Exec(false, false, name, updateGroup, createdAt, done, spec) } -func (this *ChangeModel) Apply(name, updateGroup string, createdAt time.Time, done DoneHandler, spec TargetSpec) ChangeResult { +func (this *ChangeModel) Apply(name dns.RecordSetName, updateGroup string, createdAt time.Time, done DoneHandler, spec TargetSpec) ChangeResult { return this.Exec(true, false, name, updateGroup, createdAt, done, spec) } -func (this *ChangeModel) Delete(name, updateGroup string, createdAt time.Time, done DoneHandler, spec TargetSpec) ChangeResult { +func (this *ChangeModel) Delete(name dns.RecordSetName, updateGroup string, createdAt time.Time, done DoneHandler, spec TargetSpec) ChangeResult { return this.Exec(true, true, name, updateGroup, createdAt, done, spec) } -func (this *ChangeModel) PseudoApply(name string) { +func (this *ChangeModel) PseudoApply(name dns.RecordSetName) { this.applied[name] = dns.NewDNSSet(name) } -func (this *ChangeModel) Exec(apply bool, delete bool, name, updateGroup string, createdAt time.Time, done DoneHandler, spec TargetSpec) ChangeResult { +func (this *ChangeModel) Exec(apply bool, delete bool, name dns.RecordSetName, updateGroup string, createdAt time.Time, done DoneHandler, spec TargetSpec) ChangeResult { //this.Infof("%s: %v", name, targets) if len(spec.Targets()) == 0 && !delete { return ChangeResult{} @@ -282,7 +316,7 @@ func (this *ChangeModel) Exec(apply bool, delete bool, name, updateGroup string, this.applied[name] = nil done = this.wrappedDoneHandler(name, done) } - p := this.context.providers.LookupFor(name) + p := this.context.providers.LookupFor(name.DNSName) if p == nil { err := fmt.Errorf("no provider found for %q", name) if done != nil { @@ -307,7 +341,7 @@ func (this *ChangeModel) Exec(apply bool, delete bool, name, updateGroup string, if oldset != nil { this.Debugf("found old for %s %q", oldset.GetKind(), oldset.Name) if this.IsForeign(oldset) { - err := &perrs.AlreadyBusyForOwner{DNSName: name, EntryCreatedAt: createdAt, Owner: oldset.GetOwner()} + err := &perrs.AlreadyBusyForOwner{Name: name, EntryCreatedAt: createdAt, Owner: oldset.GetOwner()} retry := p.ReportZoneStateConflict(this.context.zone.getZone(), err) if done != nil { if apply && !retry { @@ -331,16 +365,16 @@ func (this *ChangeModel) Exec(apply bool, delete bool, name, updateGroup string, curset := oldset.Sets[ty] if curset == nil { if apply { - view.addCreateRequest(newset, ty, done) + view.addCreateRequest(newset, ty, done, spec.RoutingPolicy()) } mod = true } else { - olddns, _ := dns.MapToProvider(ty, oldset, this.Domain()) - newdns, _ := dns.MapToProvider(ty, newset, this.Domain()) + olddns, _ := dns.MapToProviderEx(ty, oldset, this.Domain(), spec.RoutingPolicy()) + newdns, _ := dns.MapToProviderEx(ty, newset, this.Domain(), spec.RoutingPolicy()) if olddns == newdns { if !curset.Match(rset) { if apply { - view.addUpdateRequest(oldset, newset, ty, done) + view.addUpdateRequest(oldset, newset, ty, done, spec.RoutingPolicy()) } mod = true } else { @@ -350,7 +384,7 @@ func (this *ChangeModel) Exec(apply bool, delete bool, name, updateGroup string, } } else { if apply { - view.addCreateRequest(newset, ty, done) + view.addCreateRequest(newset, ty, done, spec.RoutingPolicy()) view.addDeleteRequest(oldset, ty, this.wrappedDoneHandler(name, nil)) } mod = true @@ -370,9 +404,9 @@ func (this *ChangeModel) Exec(apply bool, delete bool, name, updateGroup string, if !delete { if apply { this.Infof("no existing entry found for %s", name) - this.setOwner(newset, spec.OwnerId()) + this.setOwner(newset, spec.OwnerId(), spec.RoutingPolicy()) for ty := range newset.Sets { - view.addCreateRequest(newset, ty, done) + view.addCreateRequest(newset, ty, done, spec.RoutingPolicy()) } } mod = true @@ -411,15 +445,15 @@ func (this *ChangeModel) Update(logger logger.LogContext) error { return nil } -func (this *ChangeModel) IsFailed(dnsName string) bool { - return this.failedDNSNames.Contains(dnsName) +func (this *ChangeModel) IsFailed(name dns.RecordSetName) bool { + return this.failedDNSNames.Contains(name) } -func (this *ChangeModel) wrappedDoneHandler(dnsName string, done DoneHandler) DoneHandler { +func (this *ChangeModel) wrappedDoneHandler(rsName dns.RecordSetName, done DoneHandler) DoneHandler { return &changeModelDoneHandler{ changeModel: this, inner: done, - dnsName: dnsName, + rsName: rsName, } } @@ -429,7 +463,7 @@ func (this *ChangeModel) wrappedDoneHandler(dnsName string, done DoneHandler) Do type changeModelDoneHandler struct { changeModel *ChangeModel inner DoneHandler - dnsName string + rsName dns.RecordSetName } func (this *changeModelDoneHandler) SetInvalid(err error) { @@ -439,7 +473,7 @@ func (this *changeModelDoneHandler) SetInvalid(err error) { } func (this *changeModelDoneHandler) Failed(err error) { - this.changeModel.failedDNSNames.Add(this.dnsName) + this.changeModel.failedDNSNames.Add(this.rsName) if this.inner != nil { this.inner.Failed(err) } @@ -468,12 +502,12 @@ func (this *ChangeModel) IsForeign(set *dns.DNSSet) bool { return set.IsForeign(this.ownership) } -func (this *ChangeModel) setOwner(set *dns.DNSSet, id string) bool { +func (this *ChangeModel) setOwner(set *dns.DNSSet, id string, policy *dns.RoutingPolicy) bool { if id == "" { id = this.config.Ident } if id != "" { - set.SetOwner(id) + set.SetOwner(id, policy) return true } return false @@ -482,8 +516,8 @@ func (this *ChangeModel) setOwner(set *dns.DNSSet, id string) bool { func (this *ChangeModel) ApplySpec(set *dns.DNSSet, base *dns.DNSSet, provider DNSProvider, spec TargetSpec) *dns.DNSSet { set.SetKind(spec.Kind()) if base == nil || !this.IsForeign(base) { - if this.setOwner(set, spec.OwnerId()) { - set.SetMetaAttr(dns.ATTR_PREFIX, dns.TxtPrefix) + if this.setOwner(set, spec.OwnerId(), spec.RoutingPolicy()) { + set.SetMetaAttr(dns.ATTR_PREFIX, dns.TxtPrefix, spec.RoutingPolicy()) } } @@ -497,10 +531,10 @@ func (this *ChangeModel) ApplySpec(set *dns.DNSSet, base *dns.DNSSet, provider D ipv4addrs, ipv6addrs, err := lookupHosts(t.GetHostName()) if err == nil { for _, addr := range ipv4addrs { - AddRecord(targetsets, dns.RS_A, addr, ttl) + AddRecord(targetsets, dns.RS_A, addr, ttl, spec.RoutingPolicy()) } for _, addr := range ipv6addrs { - AddRecord(targetsets, dns.RS_AAAA, addr, ttl) + AddRecord(targetsets, dns.RS_AAAA, addr, ttl, spec.RoutingPolicy()) } } else { this.Errorf("cannot lookup '%s': %s", t.GetHostName(), err) @@ -509,21 +543,21 @@ func (this *ChangeModel) ApplySpec(set *dns.DNSSet, base *dns.DNSSet, provider D t.GetHostName(), strings.Join(ipv4addrs, ","), strings.Join(ipv6addrs, ",")) } else { t = provider.MapTarget(t) - AddRecord(targetsets, t.GetRecordType(), t.GetHostName(), ttl) + AddRecord(targetsets, t.GetRecordType(), t.GetHostName(), ttl, spec.RoutingPolicy()) } } set.Sets = targetsets if len(cnames) > 0 && this.Owns(set) { sort.Strings(cnames) - set.SetMetaAttr(dns.ATTR_CNAMES, strings.Join(cnames, ",")) + set.SetMetaAttr(dns.ATTR_CNAMES, strings.Join(cnames, ","), spec.RoutingPolicy()) } return set } -func AddRecord(targetsets dns.RecordSets, ty string, host string, ttl int64) { +func AddRecord(targetsets dns.RecordSets, ty string, host string, ttl int64, policy *dns.RoutingPolicy) { rs := targetsets[ty] if rs == nil { - rs = dns.NewRecordSet(ty, ttl, nil) + rs = dns.NewRecordSetEx(ty, ttl, policy, nil) targetsets[ty] = rs } rs.Records = append(rs.Records, &dns.Record{Value: host}) diff --git a/pkg/dns/provider/dedicatedrecord.go b/pkg/dns/provider/dedicatedrecord.go index ba69b26bc..28dd23ef3 100644 --- a/pkg/dns/provider/dedicatedrecord.go +++ b/pkg/dns/provider/dedicatedrecord.go @@ -26,7 +26,7 @@ import ( ) type DedicatedDNSAccess interface { - GetRecordSet(zone DNSHostedZone, dnsName, recordType string) (DedicatedRecordSet, error) + GetRecordSet(zone DNSHostedZone, rsName dns.RecordSetName, recordType string) (DedicatedRecordSet, error) CreateOrUpdateRecordSet(logger logger.LogContext, zone DNSHostedZone, old, new DedicatedRecordSet) error DeleteRecordSet(logger logger.LogContext, zone DNSHostedZone, rs DedicatedRecordSet) error } @@ -35,51 +35,55 @@ type DedicatedRecord interface { GetType() string GetValue() string GetDNSName() string + GetSetIdentifier() string GetTTL() int } type DedicatedRecordSet []DedicatedRecord type dedicatedRecord struct { - DNSName string - Type string - TTL int - Value string + dns.RecordSetName + Type string + TTL int + Value string } func (r *dedicatedRecord) GetType() string { return r.Type } func (r *dedicatedRecord) GetValue() string { return r.Value } +func (r *dedicatedRecord) GetSetIdentifier() string { return r.SetIdentifier } + func (r *dedicatedRecord) GetDNSName() string { return r.DNSName } func (r *dedicatedRecord) GetTTL() int { return r.TTL } -func FromDedicatedRecordSet(dnsName string, rs *dns.RecordSet) DedicatedRecordSet { +func FromDedicatedRecordSet(setName dns.RecordSetName, rs *dns.RecordSet) DedicatedRecordSet { recordset := DedicatedRecordSet{} for _, r := range rs.Records { recordset = append(recordset, &dedicatedRecord{ - DNSName: dnsName, - Type: rs.Type, - TTL: int(rs.TTL), - Value: r.Value, + RecordSetName: setName, + Type: rs.Type, + TTL: int(rs.TTL), + Value: r.Value, }) } return recordset } -func ToDedicatedRecordset(rawrs DedicatedRecordSet) (string, *dns.RecordSet) { +func ToDedicatedRecordset(rawrs DedicatedRecordSet) (dns.RecordSetName, *dns.RecordSet) { if len(rawrs) == 0 { - return "", nil + return dns.RecordSetName{}, nil } dnsName := rawrs[0].GetDNSName() + setIdentifier := rawrs[0].GetSetIdentifier() rtype := rawrs[0].GetType() ttl := int64(rawrs[0].GetTTL()) records := []*dns.Record{} for _, r := range rawrs { records = append(records, &dns.Record{Value: r.GetValue()}) } - return dnsName, dns.NewRecordSet(rtype, ttl, records) + return dns.RecordSetName{DNSName: dnsName, SetIdentifier: setIdentifier}, dns.NewRecordSet(rtype, ttl, records) } func (rs DedicatedRecordSet) GetAttr(name string) string { diff --git a/pkg/dns/provider/entry.go b/pkg/dns/provider/entry.go index c5441f043..dbea30bf6 100644 --- a/pkg/dns/provider/entry.go +++ b/pkg/dns/provider/entry.go @@ -19,6 +19,7 @@ package provider import ( "fmt" "net" + "reflect" "sort" "strconv" "strings" @@ -78,12 +79,13 @@ func (this *EntryPremise) NotifyChange(p *EntryPremise) string { } type EntryVersion struct { - object dnsutils.DNSSpecification - providername resources.ObjectName - dnsname string - targets Targets - mappings map[string][]string - warnings []string + object dnsutils.DNSSpecification + providername resources.ObjectName + rsname dns.RecordSetName + targets Targets + routingPolicy *dns.RoutingPolicy + mappings map[string][]string + warnings []string status api.DNSBaseStatus @@ -97,7 +99,7 @@ type EntryVersion struct { func NewEntryVersion(object dnsutils.DNSSpecification, old *Entry) *EntryVersion { v := &EntryVersion{ object: object, - dnsname: object.GetDNSName(), + rsname: dns.RecordSetName{DNSName: object.GetDNSName(), SetIdentifier: object.GetSetIdentifier()}, targets: Targets{}, mappings: map[string][]string{}, } @@ -114,8 +116,8 @@ func (this *EntryVersion) Kind() string { } func (this *EntryVersion) RequiresUpdateFor(e *EntryVersion) (reasons []string, refresh bool) { - if this.dnsname != e.dnsname { - reasons = append(reasons, "dnsname changed") + if this.rsname != e.rsname { + reasons = append(reasons, "recordset name changed") } if !utils.Int64Equal(this.status.TTL, e.status.TTL) { reasons = append(reasons, "ttl changed") @@ -132,6 +134,9 @@ func (this *EntryVersion) RequiresUpdateFor(e *EntryVersion) (reasons []string, if this.targets.DifferFrom(e.targets) { reasons = append(reasons, "targets changed") } + if !reflect.DeepEqual(this.routingPolicy, e.routingPolicy) { + reasons = append(reasons, "routing policy changed") + } if this.State() != e.State() { if e.State() != api.STATE_READY { reasons = append(reasons, "state changed") @@ -189,17 +194,29 @@ func (this *EntryVersion) ObjectName() resources.ObjectName { } func (this *EntryVersion) DNSName() string { - return this.dnsname + return this.rsname.DNSName +} + +func (this *EntryVersion) SetIdentifier() string { + return this.rsname.SetIdentifier +} + +func (this *EntryVersion) RecordSetName() dns.RecordSetName { + return this.rsname } -func (this *EntryVersion) ZonedDNSName() ZonedDNSName { - return ZonedDNSName{ZoneID: this.ZoneId(), DNSName: this.dnsname} +func (this *EntryVersion) ZonedDNSName() ZonedRecordSetName { + return ZonedRecordSetName{ZoneID: this.ZoneId(), RecordSetName: this.rsname} } func (this *EntryVersion) Targets() Targets { return this.targets } +func (this *EntryVersion) RoutingPolicy() *dns.RoutingPolicy { + return this.routingPolicy +} + func (this *EntryVersion) Description() string { return this.object.Description() } @@ -238,6 +255,7 @@ type dnsSpecModification struct { ttl *int64 ownerid *string lookup *int64 + policy *dns.RoutingPolicy } func (this *dnsSpecModification) GetTargets() []string { @@ -276,7 +294,7 @@ func (this *dnsSpecModification) GetTTL() *int64 { } func (this *dnsSpecModification) IsModified() bool { - return this.targets != nil || this.text != nil || this.ownerid != nil || this.lookup != nil || this.ttl != nil + return this.targets != nil || this.text != nil || this.ownerid != nil || this.lookup != nil || this.ttl != nil || this.policy != nil } func complete(logger logger.LogContext, state *state, spec dnsutils.DNSSpecification, object resources.Object, prefix string) (dnsutils.DNSSpecification, error) { @@ -357,7 +375,7 @@ func validate(logger logger.LogContext, state *state, entry *EntryVersion, p *En return } - if p.zonedomain == entry.dnsname { + if p.zonedomain == entry.rsname.DNSName { err = fmt.Errorf("usage of dns name (%s) identical to domain of hosted zone (%s) is not supported", p.zonedomain, p.zoneid) return @@ -567,6 +585,7 @@ func (this *EntryVersion) Setup(logger logger.LogContext, state *state, p *Entry } this.targets = targets + this.routingPolicy = spec.GetRoutingPolicy() if err != nil { if this.status.State != api.STATE_STALE { if this.status.State == api.STATE_READY && (p.provider != nil && !p.provider.IsValid()) { @@ -586,7 +605,7 @@ func (this *EntryVersion) Setup(logger logger.LogContext, state *state, p *Entry if p.zoneid == "" { this.status.State = api.STATE_ERROR this.status.Provider = nil - this.status.Message = StatusMessagef("no provider found for %q", this.dnsname) + this.status.Message = StatusMessagef("no provider found for %q", this.rsname) } else { if p.provider.IsValid() { this.valid = true @@ -652,6 +671,7 @@ func (this *EntryVersion) updateStatus(logger logger.LogContext, state, msg stri } if utils.StringValue(this.status.Provider) == "" { mod.Modify(o.AcknowledgeTargets(nil)) + mod.Modify(o.AcknowledgeRoutingPolicy(nil)) } if mod.IsModified() { logmsg.Infof(logger) @@ -683,11 +703,15 @@ func (this *EntryVersion) UpdateStatus(logger logger.LogContext, state string, m logger.Info(msg) mod.Modify(true) } + if o.AcknowledgeRoutingPolicy(this.routingPolicy) { + mod.Modify(true) + } if this.status.Provider != nil { mod.AssureStringPtrPtr(&b.Provider, this.status.Provider) } } else if state != api.STATE_STALE { mod.Modify(o.AcknowledgeTargets(nil)) + mod.Modify(o.AcknowledgeRoutingPolicy(nil)) } mod.AssureInt64Value(&b.ObservedGeneration, o.GetGeneration()) if !(this.status.State == api.STATE_STALE && this.status.State == state) { diff --git a/pkg/dns/provider/errors/errors.go b/pkg/dns/provider/errors/errors.go index a6f36cd64..ec13cca2b 100644 --- a/pkg/dns/provider/errors/errors.go +++ b/pkg/dns/provider/errors/errors.go @@ -21,6 +21,7 @@ import ( "time" "github.com/gardener/controller-manager-library/pkg/resources" + "github.com/gardener/external-dns-management/pkg/dns" ) type AlreadyBusyForEntry struct { @@ -33,13 +34,13 @@ func (e *AlreadyBusyForEntry) Error() string { } type AlreadyBusyForOwner struct { - DNSName string + Name dns.RecordSetName EntryCreatedAt time.Time Owner string } func (e *AlreadyBusyForOwner) Error() string { - return fmt.Sprintf("DNS name %q already busy for owner %q", e.DNSName, e.Owner) + return fmt.Sprintf("DNS name %q already busy for owner %q", e.Name, e.Owner) } type NoSuchHostedZone struct { diff --git a/pkg/dns/provider/inmemory.go b/pkg/dns/provider/inmemory.go index a216b4189..3817a1b08 100644 --- a/pkg/dns/provider/inmemory.go +++ b/pkg/dns/provider/inmemory.go @@ -121,7 +121,7 @@ func (m *InMemory) Apply(zoneID dns.ZoneID, request *ChangeRequest, metrics Metr return nil } -func buildRecordSet(req *ChangeRequest) (string, *dns.RecordSet) { +func buildRecordSet(req *ChangeRequest) (dns.RecordSetName, *dns.RecordSet) { var dnsset *dns.DNSSet switch req.Action { case R_CREATE, R_UPDATE: diff --git a/pkg/dns/provider/raw/execution.go b/pkg/dns/provider/raw/execution.go index c4afca3c5..3d834ba76 100644 --- a/pkg/dns/provider/raw/execution.go +++ b/pkg/dns/provider/raw/execution.go @@ -50,7 +50,7 @@ type Execution struct { updates RecordSet deletions RecordSet - results map[string]*result + results map[dns.RecordSetName]*result } func NewExecution(logger logger.LogContext, e Executor, state *ZoneState, zone provider.DNSHostedZone) *Execution { @@ -60,7 +60,7 @@ func NewExecution(logger logger.LogContext, e Executor, state *ZoneState, zone p zone: zone, state: state, domain: zone.Domain(), - results: map[string]*result{}, + results: map[dns.RecordSetName]*result{}, additions: RecordSet{}, updates: RecordSet{}, deletions: RecordSet{}, @@ -68,16 +68,24 @@ func NewExecution(logger logger.LogContext, e Executor, state *ZoneState, zone p } func (this *Execution) AddChange(req *provider.ChangeRequest) { - var name string + var name dns.RecordSetName var newset, oldset *dns.RecordSet if req.Addition != nil { - name, newset = dns.MapToProvider(req.Type, req.Addition, this.domain) + name, newset = dns.MapToProviderEx(req.Type, req.Addition, this.domain, nil) } if req.Deletion != nil { - name, oldset = dns.MapToProvider(req.Type, req.Deletion, this.domain) + name, oldset = dns.MapToProviderEx(req.Type, req.Deletion, this.domain, nil) } - if name == "" || (newset.Length() == 0 && oldset.Length() == 0) { + if name.DNSName == "" || (newset.Length() == 0 && oldset.Length() == 0) { + return + } + if name.SetIdentifier != "" || req.RoutingPolicy != nil { + err := fmt.Errorf("routing policy not supported") + this.Warnf("record set %s[%s]: %s", name, this.zone.Id(), err) + if req.Done != nil { + req.Done.SetInvalid(err) + } return } switch req.Action { @@ -108,10 +116,10 @@ func (this *Execution) AddChange(req *provider.ChangeRequest) { } } -func (this *Execution) add(dnsname string, rset *dns.RecordSet, modonly bool, found *RecordSet, notfound *RecordSet) { +func (this *Execution) add(name dns.RecordSetName, rset *dns.RecordSet, modonly bool, found *RecordSet, notfound *RecordSet) { rtype := rset.Type for _, r := range rset.Records { - old := this.state.GetRecord(dnsname, rtype, r.Value) + old := this.state.GetRecord(name, rtype, r.Value) if old != nil { if (!modonly) || (old.GetTTL() != int(rset.TTL)) { or := old.Copy() @@ -120,7 +128,7 @@ func (this *Execution) add(dnsname string, rset *dns.RecordSet, modonly bool, fo } } else { if notfound != nil { - record := this.executor.NewRecord(dnsname, rset.Type, r.Value, this.zone, rset.TTL) + record := this.executor.NewRecord(name.DNSName, rset.Type, r.Value, this.zone, rset.TTL) *notfound = append(*notfound, record) } } @@ -176,7 +184,7 @@ func (this *Execution) SubmitChanges() error { func (this *Execution) submit(f func(record Record, zone provider.DNSHostedZone) error, r Record) { err := f(r, this.zone) if err != nil { - res := this.results[r.GetDNSName()] + res := this.results[dns.RecordSetName{DNSName: r.GetDNSName(), SetIdentifier: r.GetSetIdentifier()}] if res != nil { res.err = err this.Infof("operation failed for %s %s: %s", r.GetType(), r.GetDNSName(), err) diff --git a/pkg/dns/provider/raw/records.go b/pkg/dns/provider/raw/records.go index d19671c00..6491f561a 100644 --- a/pkg/dns/provider/raw/records.go +++ b/pkg/dns/provider/raw/records.go @@ -29,6 +29,7 @@ type Record interface { GetType() string GetValue() string GetDNSName() string + GetSetIdentifier() string GetTTL() int SetTTL(int) Copy() Record @@ -56,13 +57,13 @@ func (this DNSSet) Clone() DNSSet { type ZoneState struct { dnssets dns.DNSSets - records map[string]DNSSet + records map[dns.RecordSetName]DNSSet } var _ provider.DNSZoneState = &ZoneState{} func NewState() *ZoneState { - return &ZoneState{records: map[string]DNSSet{}} + return &ZoneState{records: map[dns.RecordSetName]DNSSet{}} } func (this *ZoneState) GetDNSSets() dns.DNSSets { @@ -72,7 +73,7 @@ func (this *ZoneState) GetDNSSets() dns.DNSSets { func (this *ZoneState) Clone() provider.DNSZoneState { clone := NewState() clone.dnssets = this.dnssets.Clone() - clone.records = map[string]DNSSet{} + clone.records = map[dns.RecordSetName]DNSSet{} for k, v := range this.records { clone.records[k] = v.Clone() } @@ -81,7 +82,7 @@ func (this *ZoneState) Clone() provider.DNSZoneState { func (this *ZoneState) AddRecord(r Record) { if dns.SupportedRecordType(r.GetType()) { - name := r.GetDNSName() + name := dns.RecordSetName{DNSName: r.GetDNSName(), SetIdentifier: r.GetSetIdentifier()} t := r.GetType() e := this.records[name] if e == nil { @@ -92,7 +93,7 @@ func (this *ZoneState) AddRecord(r Record) { } } -func (this *ZoneState) GetRecord(dnsname, rtype, value string) Record { +func (this *ZoneState) GetRecord(dnsname dns.RecordSetName, rtype, value string) Record { e := this.records[dnsname] if e != nil { for _, r := range e[rtype] { @@ -113,7 +114,7 @@ func (this *ZoneState) CalculateDNSSets() { rs.TTL = int64(r.GetTTL()) rs.Add(&dns.Record{Value: r.GetValue()}) } - this.dnssets.AddRecordSetFromProvider(dnsname, rs) + this.dnssets.AddRecordSetFromProviderEx(dnsname, rs) } } } diff --git a/pkg/dns/provider/state.go b/pkg/dns/provider/state.go index af41fc35b..9b15bfb5d 100644 --- a/pkg/dns/provider/state.go +++ b/pkg/dns/provider/state.go @@ -40,26 +40,35 @@ import ( "github.com/gardener/external-dns-management/pkg/server/remote/embed" ) -type ZonedDNSName struct { - ZoneID dns.ZoneID - DNSName string +type ZonedRecordSetName struct { + dns.RecordSetName + ZoneID dns.ZoneID } -func (z ZonedDNSName) String() string { - return fmt.Sprintf("%s[%s]", z.DNSName, z.ZoneID) +func (z ZonedRecordSetName) String() string { + return fmt.Sprintf("%s[%s]", z.RecordSetName, z.ZoneID) } -type DNSNames map[ZonedDNSName]*Entry +type RecordSetNames map[ZonedRecordSetName]*Entry -type DNSNameSet = utils.StringSet +type RecordSetNameSet map[dns.RecordSetName]struct{} + +func (s RecordSetNameSet) Add(name dns.RecordSetName) { + s[name] = struct{}{} +} + +func (s RecordSetNameSet) Contains(name dns.RecordSetName) bool { + _, ok := s[name] + return ok +} type zoneReconciliation struct { zone *dnsHostedZone providers DNSProviders entries Entries - equivEntries DNSNameSet + equivEntries RecordSetNameSet ownership dns.Ownership - stale DNSNames + stale RecordSetNames dedicated bool deleting bool fhandler FinalizerHandler @@ -148,7 +157,7 @@ type state struct { providerRateLimiter map[resources.ObjectName]*rateLimiterData prlock sync.RWMutex - dnsnames DNSNames + dnsnames RecordSetNames references *References initialized bool @@ -202,7 +211,7 @@ func NewDNSState(ctx Context, ownerresc, secretresc resources.Interface, classes entries: Entries{}, outdated: newSynchronizedEntries(), blockingEntries: map[resources.ObjectName]time.Time{}, - dnsnames: map[ZonedDNSName]*Entry{}, + dnsnames: map[ZonedRecordSetName]*Entry{}, references: NewReferenceCache(), providerRateLimiter: map[resources.ObjectName]*rateLimiterData{}, } @@ -458,26 +467,26 @@ func (this *state) GetZonesForProvider(name resources.ObjectName) dnsHostedZones return copyZones(this.providerzones[name]) } -func (this *state) GetEntriesForZone(logger logger.LogContext, zoneid dns.ZoneID) (Entries, DNSNames, bool) { +func (this *state) GetEntriesForZone(logger logger.LogContext, zoneid dns.ZoneID) (Entries, RecordSetNames, bool) { this.lock.RLock() defer this.lock.RUnlock() entries := Entries{} zone := this.zones[zoneid] if zone != nil { - entries, _, stale, deleting := this.addEntriesForZone(logger, entries, DNSNames{}, zone) + entries, _, stale, deleting := this.addEntriesForZone(logger, entries, RecordSetNames{}, zone) return entries, stale, deleting } return entries, nil, false } -func (this *state) addEntriesForZone(logger logger.LogContext, entries Entries, stale DNSNames, zone DNSHostedZone) (Entries, DNSNameSet, DNSNames, bool) { +func (this *state) addEntriesForZone(logger logger.LogContext, entries Entries, stale RecordSetNames, zone DNSHostedZone) (Entries, RecordSetNameSet, RecordSetNames, bool) { if entries == nil { entries = Entries{} } if stale == nil { - stale = DNSNames{} + stale = RecordSetNames{} } - equivEntries := DNSNameSet{} + equivEntries := RecordSetNameSet{} deleting := true // TODO check domain := zone.Domain() // fallback if no forwarded domains are reported @@ -511,7 +520,7 @@ func (this *state) addEntriesForZone(logger logger.LogContext, entries Entries, continue } else if !provider.IncludesZone(zone.Id()) { if provider.HasEquivalentZone(zone.Id()) && e.IsActive() && !forwarded(nested, dns.DNSName) { - equivEntries.Add(dns.DNSName) + equivEntries.Add(dns.RecordSetName) } continue } diff --git a/pkg/dns/provider/state_entry.go b/pkg/dns/provider/state_entry.go index 6232ab58a..6c314578f 100644 --- a/pkg/dns/provider/state_entry.go +++ b/pkg/dns/provider/state_entry.go @@ -411,9 +411,9 @@ func (this *state) checkAndUpdateLock(logger logger.LogContext, entry *Entry, pr target := dnsutils.NewText(s, newTTL) records = append(records, target.AsRecord()) } - newRS := FromDedicatedRecordSet(entry.DNSName(), dns.NewRecordSet(dns.RS_TXT, newTTL, records)) + newRS := FromDedicatedRecordSet(entry.RecordSetName(), dns.NewRecordSet(dns.RS_TXT, newTTL, records)) - rs, err := handler.GetRecordSet(zone, entry.DNSName(), dns.RS_TXT) + rs, err := handler.GetRecordSet(zone, entry.RecordSetName(), dns.RS_TXT) if err != nil { return reconcile.Delay(logger, err) } @@ -515,7 +515,7 @@ func (this *state) checkAndDeleteLock(logger logger.LogContext, entry *Entry, pr handler := premise.provider.GetDedicatedDNSAccess() zone := this.zones[entry.ZoneId()] - rs, err := handler.GetRecordSet(zone, entry.DNSName(), dns.RS_TXT) + rs, err := handler.GetRecordSet(zone, entry.RecordSetName(), dns.RS_TXT) if err != nil { return reconcile.Delay(logger, err) } diff --git a/pkg/dns/provider/state_provider.go b/pkg/dns/provider/state_provider.go index d071d8403..7d4564872 100644 --- a/pkg/dns/provider/state_provider.go +++ b/pkg/dns/provider/state_provider.go @@ -86,7 +86,7 @@ func (this *state) _UpdateLocalProvider(logger logger.LogContext, obj *dnsutils. if last != nil { logger.Infof("trigger entries for old zones") entries := Entries{} - stale := DNSNames{} + stale := RecordSetNames{} for _, z := range last.zones { this.addEntriesForZone(logger, entries, stale, z) } diff --git a/pkg/dns/provider/state_zone.go b/pkg/dns/provider/state_zone.go index e6cf1fbac..ea7e134a4 100644 --- a/pkg/dns/provider/state_zone.go +++ b/pkg/dns/provider/state_zone.go @@ -196,14 +196,14 @@ func (this *state) reconcileZone(logger logger.LogContext, req *zoneReconciliati spec := e.object.GetTargetSpec(e) statusUpdate := NewStatusUpdate(logger, e, this.GetContext()) if e.IsDeleting() { - changeResult = changes.Delete(e.DNSName(), e.ObjectName().Namespace(), e.CreatedAt(), statusUpdate, spec) + changeResult = changes.Delete(e.RecordSetName(), e.ObjectName().Namespace(), e.CreatedAt(), statusUpdate, spec) } else { if !e.NotRateLimited() { - changeResult = changes.Check(e.DNSName(), e.ObjectName().Namespace(), e.CreatedAt(), statusUpdate, spec) + changeResult = changes.Check(e.RecordSetName(), e.ObjectName().Namespace(), e.CreatedAt(), statusUpdate, spec) if changeResult.Modified { if accepted, delay := this.tryAcceptProviderRateLimiter(logger, e); !accepted { req.zone.nextTrigger = delay - changes.PseudoApply(e.DNSName()) + changes.PseudoApply(e.RecordSetName()) logger.Infof("rate limited %s, delay %.1f s", e.ObjectName(), delay.Seconds()) statusUpdate.Throttled() if delay.Seconds() > 2 { @@ -213,7 +213,7 @@ func (this *state) reconcileZone(logger logger.LogContext, req *zoneReconciliati } } } - changeResult = changes.Apply(e.DNSName(), e.ObjectName().Namespace(), e.CreatedAt(), statusUpdate, spec) + changeResult = changes.Apply(e.RecordSetName(), e.ObjectName().Namespace(), e.CreatedAt(), statusUpdate, spec) if changeResult.Error != nil && changeResult.Retry { conflictErr = changeResult.Error } @@ -228,7 +228,7 @@ func (this *state) reconcileZone(logger logger.LogContext, req *zoneReconciliati outdatedEntries := EntryList{} this.outdated.AddActiveZoneTo(zoneid, &outdatedEntries) for _, e := range outdatedEntries { - if changes.IsFailed(e.DNSName()) { + if changes.IsFailed(e.RecordSetName()) { continue } logger.Infof("cleanup outdated entry %q", e.ObjectName()) diff --git a/pkg/dns/provider/zonecache.go b/pkg/dns/provider/zonecache.go index 7f07f4b86..a3aa5940b 100644 --- a/pkg/dns/provider/zonecache.go +++ b/pkg/dns/provider/zonecache.go @@ -355,9 +355,11 @@ func (s *zoneStates) ExecuteRequests(zoneID dns.ZoneID, reqs []*ChangeRequest) { var err error nullMetrics := &NullMetrics{} for _, req := range reqs { - err = s.inMemory.Apply(zoneID, req, nullMetrics) - if err != nil { - break + if req.Applied { + err = s.inMemory.Apply(zoneID, req, nullMetrics) + if err != nil { + break + } } } diff --git a/pkg/dns/records.go b/pkg/dns/records.go index d962f1abb..9dbb2f994 100644 --- a/pkg/dns/records.go +++ b/pkg/dns/records.go @@ -18,6 +18,7 @@ package dns import ( "fmt" + "reflect" "strings" ) @@ -55,21 +56,26 @@ func (this *Record) Clone() *Record { } type RecordSet struct { - Type string - TTL int64 - IgnoreTTL bool - Records Records + Type string + TTL int64 + IgnoreTTL bool + RoutingPolicy *RoutingPolicy + Records Records } func NewRecordSet(rtype string, ttl int64, records []*Record) *RecordSet { + return NewRecordSetEx(rtype, ttl, nil, records) +} + +func NewRecordSetEx(rtype string, ttl int64, policy *RoutingPolicy, records []*Record) *RecordSet { if records == nil { records = Records{} } - return &RecordSet{Type: rtype, TTL: ttl, Records: records} + return &RecordSet{Type: rtype, TTL: ttl, RoutingPolicy: policy, Records: records} } func (this *RecordSet) Clone() *RecordSet { - set := &RecordSet{this.Type, this.TTL, this.IgnoreTTL, nil} + set := &RecordSet{Type: this.Type, TTL: this.TTL, IgnoreTTL: this.IgnoreTTL, RoutingPolicy: this.RoutingPolicy.Clone()} for _, r := range this.Records { set.Records = append(set.Records, r.Clone()) } @@ -111,6 +117,9 @@ func (this *RecordSet) Match(set *RecordSet) bool { if !this.IgnoreTTL && !set.IgnoreTTL && this.TTL != set.TTL { return false } + if !reflect.DeepEqual(this.RoutingPolicy, set.RoutingPolicy) { + return false + } for _, r := range this.Records { found := false @@ -198,7 +207,7 @@ func newAttrRecord(name, value string) *Record { return &Record{Value: newAttrValue(name, value)} } -func newAttrRecordSet(ty string, name, value string) *RecordSet { +func newAttrRecordSet(ty string, name, value string, routingPolicy *RoutingPolicy) *RecordSet { records := []*Record{newAttrRecord(name, value)} - return &RecordSet{ty, 600, false, records} + return &RecordSet{Type: ty, TTL: 600, IgnoreTTL: false, RoutingPolicy: routingPolicy, Records: records} } diff --git a/pkg/dns/utils/target.go b/pkg/dns/utils/target.go index b749d6394..7c1f73d25 100644 --- a/pkg/dns/utils/target.go +++ b/pkg/dns/utils/target.go @@ -92,20 +92,23 @@ type TargetSpec interface { Kind() string OwnerId() string Targets() []Target + RoutingPolicy() *dns.RoutingPolicy Responsible(set *dns.DNSSet, ownership dns.Ownership) bool } type targetSpec struct { - kind string - ownerId string - targets []Target + kind string + ownerId string + targets []Target + routingPolicy *dns.RoutingPolicy } func BaseTargetSpec(entry DNSSpecification, p TargetProvider) TargetSpec { spec := &targetSpec{ - kind: entry.GroupKind().Kind, - ownerId: p.OwnerId(), - targets: p.Targets(), + kind: entry.GroupKind().Kind, + ownerId: p.OwnerId(), + targets: p.Targets(), + routingPolicy: p.RoutingPolicy(), } return spec } @@ -125,3 +128,7 @@ func (this *targetSpec) Targets() []Target { func (this *targetSpec) Responsible(set *dns.DNSSet, ownership dns.Ownership) bool { return !set.IsForeign(ownership) } + +func (this *targetSpec) RoutingPolicy() *dns.RoutingPolicy { + return this.routingPolicy +} diff --git a/pkg/dns/utils/utils_dns.go b/pkg/dns/utils/utils_dns.go index 87a60d747..76b54dc8c 100644 --- a/pkg/dns/utils/utils_dns.go +++ b/pkg/dns/utils/utils_dns.go @@ -20,19 +20,21 @@ import ( "time" "github.com/gardener/controller-manager-library/pkg/resources" - api "github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1" + "github.com/gardener/external-dns-management/pkg/dns" ) type TargetProvider interface { Targets() Targets TTL() int64 OwnerId() string + RoutingPolicy() *dns.RoutingPolicy } type DNSSpecification interface { resources.Object GetDNSName() string + GetSetIdentifier() string GetTTL() *int64 GetOwnerId() *string GetTargets() []string @@ -40,12 +42,14 @@ type DNSSpecification interface { GetCNameLookupInterval() *int64 GetReference() *api.EntryReference BaseStatus() *api.DNSBaseStatus + GetRoutingPolicy() *dns.RoutingPolicy GetTargetSpec(TargetProvider) TargetSpec RefreshTime() time.Time ValidateSpecial() error AcknowledgeTargets(targets []string) bool + AcknowledgeRoutingPolicy(policy *dns.RoutingPolicy) bool } func DNSObject(data resources.Object, ign ...interface{}) DNSSpecification { diff --git a/pkg/dns/utils/utils_entry.go b/pkg/dns/utils/utils_entry.go index 037945894..dbadc95f1 100644 --- a/pkg/dns/utils/utils_entry.go +++ b/pkg/dns/utils/utils_entry.go @@ -21,6 +21,7 @@ import ( "time" "github.com/gardener/controller-manager-library/pkg/resources" + "github.com/gardener/external-dns-management/pkg/dns" api "github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1" ) @@ -63,6 +64,12 @@ func (this *DNSEntryObject) BaseStatus() *api.DNSBaseStatus { func (this *DNSEntryObject) GetDNSName() string { return this.DNSEntry().Spec.DNSName } +func (this *DNSEntryObject) GetSetIdentifier() string { + if policy := this.DNSEntry().Spec.RoutingPolicy; policy != nil { + return policy.SetIdentifier + } + return "" +} func (this *DNSEntryObject) GetTargets() []string { return this.DNSEntry().Spec.Targets } @@ -81,6 +88,15 @@ func (this *DNSEntryObject) GetCNameLookupInterval() *int64 { func (this *DNSEntryObject) GetReference() *api.EntryReference { return this.DNSEntry().Spec.Reference } +func (this *DNSEntryObject) GetRoutingPolicy() *dns.RoutingPolicy { + if policy := this.DNSEntry().Spec.RoutingPolicy; policy != nil { + return &dns.RoutingPolicy{ + Type: policy.Type, + Parameters: policy.Parameters, + } + } + return nil +} func (this *DNSEntryObject) RefreshTime() time.Time { return time.Time{} @@ -99,6 +115,27 @@ func (this *DNSEntryObject) AcknowledgeTargets(targets []string) bool { return false } +func (this *DNSEntryObject) AcknowledgeRoutingPolicy(policy *dns.RoutingPolicy) bool { + s := this.Status() + if s.RoutingPolicy == nil && policy == nil { + return false + } + if policy == nil { + s.RoutingPolicy = nil + return true + } + statusPolicy := &api.RoutingPolicy{ + Type: policy.Type, + SetIdentifier: this.GetSetIdentifier(), + Parameters: policy.Parameters, + } + if !reflect.DeepEqual(s.RoutingPolicy, statusPolicy) { + s.RoutingPolicy = statusPolicy + return true + } + return false +} + func (this *DNSEntryObject) GetTargetSpec(p TargetProvider) TargetSpec { return BaseTargetSpec(this, p) } diff --git a/pkg/dns/utils/utils_lock.go b/pkg/dns/utils/utils_lock.go index 4d884609a..a5d0f4423 100644 --- a/pkg/dns/utils/utils_lock.go +++ b/pkg/dns/utils/utils_lock.go @@ -67,6 +67,10 @@ func (this *DNSLockObject) GetDNSName() string { return this.DNSLock().Spec.DNSName } +func (this *DNSLockObject) GetSetIdentifier() string { + return "" +} + func (this *DNSLockObject) GetTargets() []string { return nil } @@ -109,6 +113,10 @@ func (this *DNSLockObject) GetReference() *api.EntryReference { return nil } +func (this *DNSLockObject) GetRoutingPolicy() *dns.RoutingPolicy { + return nil +} + func (this *DNSLockObject) RefreshTime() time.Time { return this.Spec().Timestamp.Time } @@ -124,6 +132,10 @@ func (this *DNSLockObject) AcknowledgeTargets(targets []string) bool { return false } +func (this *DNSLockObject) AcknowledgeRoutingPolicy(policy *dns.RoutingPolicy) bool { + return false +} + func (this *DNSLockObject) GetTargetSpec(p TargetProvider) TargetSpec { return &lockTargetSpec{ TargetSpec: BaseTargetSpec(this, p), diff --git a/pkg/server/remote/common/remote.pb.go b/pkg/server/remote/common/remote.pb.go index 21cc3dd9d..ffe639b4d 100644 --- a/pkg/server/remote/common/remote.pb.go +++ b/pkg/server/remote/common/remote.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.13.0 +// protoc v3.19.4 // source: pkg/server/remote/common/remote.proto package common @@ -519,9 +519,10 @@ type RecordSet struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Ttl int32 `protobuf:"varint,2,opt,name=ttl,proto3" json:"ttl,omitempty"` - Record []*RecordSet_Record `protobuf:"bytes,3,rep,name=record,proto3" json:"record,omitempty"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Ttl int32 `protobuf:"varint,2,opt,name=ttl,proto3" json:"ttl,omitempty"` + Record []*RecordSet_Record `protobuf:"bytes,3,rep,name=record,proto3" json:"record,omitempty"` + RoutingPolicy *RecordSet_RoutingPolicy `protobuf:"bytes,4,opt,name=routing_policy,json=routingPolicy,proto3" json:"routing_policy,omitempty"` } func (x *RecordSet) Reset() { @@ -577,14 +578,22 @@ func (x *RecordSet) GetRecord() []*RecordSet_Record { return nil } +func (x *RecordSet) GetRoutingPolicy() *RecordSet_RoutingPolicy { + if x != nil { + return x.RoutingPolicy + } + return nil +} + type DNSSet struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - DnsName string `protobuf:"bytes,1,opt,name=dns_name,json=dnsName,proto3" json:"dns_name,omitempty"` - UpdateGroup string `protobuf:"bytes,2,opt,name=update_group,json=updateGroup,proto3" json:"update_group,omitempty"` - Records map[string]*RecordSet `protobuf:"bytes,3,rep,name=records,proto3" json:"records,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + DnsName string `protobuf:"bytes,1,opt,name=dns_name,json=dnsName,proto3" json:"dns_name,omitempty"` + UpdateGroup string `protobuf:"bytes,2,opt,name=update_group,json=updateGroup,proto3" json:"update_group,omitempty"` + Records map[string]*RecordSet `protobuf:"bytes,3,rep,name=records,proto3" json:"records,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + SetIdentifier string `protobuf:"bytes,4,opt,name=set_identifier,json=setIdentifier,proto3" json:"set_identifier,omitempty"` } func (x *DNSSet) Reset() { @@ -640,15 +649,23 @@ func (x *DNSSet) GetRecords() map[string]*RecordSet { return nil } +func (x *DNSSet) GetSetIdentifier() string { + if x != nil { + return x.SetIdentifier + } + return "" +} + type PartialDNSSet struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - DnsName string `protobuf:"bytes,1,opt,name=dns_name,json=dnsName,proto3" json:"dns_name,omitempty"` - UpdateGroup string `protobuf:"bytes,2,opt,name=update_group,json=updateGroup,proto3" json:"update_group,omitempty"` - RecordType string `protobuf:"bytes,3,opt,name=record_type,json=recordType,proto3" json:"record_type,omitempty"` - RecordSet *RecordSet `protobuf:"bytes,4,opt,name=record_set,json=recordSet,proto3" json:"record_set,omitempty"` + DnsName string `protobuf:"bytes,1,opt,name=dns_name,json=dnsName,proto3" json:"dns_name,omitempty"` + UpdateGroup string `protobuf:"bytes,2,opt,name=update_group,json=updateGroup,proto3" json:"update_group,omitempty"` + RecordType string `protobuf:"bytes,3,opt,name=record_type,json=recordType,proto3" json:"record_type,omitempty"` + RecordSet *RecordSet `protobuf:"bytes,4,opt,name=record_set,json=recordSet,proto3" json:"record_set,omitempty"` + SetIdentifier string `protobuf:"bytes,5,opt,name=set_identifier,json=setIdentifier,proto3" json:"set_identifier,omitempty"` } func (x *PartialDNSSet) Reset() { @@ -711,6 +728,13 @@ func (x *PartialDNSSet) GetRecordSet() *RecordSet { return nil } +func (x *PartialDNSSet) GetSetIdentifier() string { + if x != nil { + return x.SetIdentifier + } + return "" +} + type ZoneState struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1104,6 +1128,61 @@ func (x *RecordSet_Record) GetValue() string { return "" } +type RecordSet_RoutingPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Parameters map[string]string `protobuf:"bytes,2,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *RecordSet_RoutingPolicy) Reset() { + *x = RecordSet_RoutingPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RecordSet_RoutingPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RecordSet_RoutingPolicy) ProtoMessage() {} + +func (x *RecordSet_RoutingPolicy) ProtoReflect() protoreflect.Message { + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RecordSet_RoutingPolicy.ProtoReflect.Descriptor instead. +func (*RecordSet_RoutingPolicy) Descriptor() ([]byte, []int) { + return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{6, 1} +} + +func (x *RecordSet_RoutingPolicy) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *RecordSet_RoutingPolicy) GetParameters() map[string]string { + if x != nil { + return x.Parameters + } + return nil +} + var File_pkg_server_remote_common_remote_proto protoreflect.FileDescriptor var file_pkg_server_remote_common_remote_proto_rawDesc = []byte{ @@ -1138,119 +1217,140 @@ var file_pkg_server_remote_common_remote_proto_rawDesc = []byte{ 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x69, 0x64, 0x22, - 0x83, 0x01, 0x0a, 0x09, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x12, 0x12, 0x0a, + 0x81, 0x03, 0x0a, 0x09, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x30, 0x0a, 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x06, 0x72, - 0x65, 0x63, 0x6f, 0x72, 0x64, 0x1a, 0x1e, 0x0a, 0x06, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xcc, 0x01, 0x0a, 0x06, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, - 0x12, 0x19, 0x0a, 0x08, 0x64, 0x6e, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x64, 0x6e, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x35, - 0x0a, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1b, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x2e, - 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x72, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x73, 0x1a, 0x4d, 0x0a, 0x0c, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, - 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa0, 0x01, 0x0a, 0x0d, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, - 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x64, 0x6e, 0x73, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6e, 0x73, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, - 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, - 0x72, 0x6f, 0x75, 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x63, 0x6f, 0x72, - 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, 0x0a, 0x0a, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, - 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x52, 0x09, 0x72, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x22, 0xa4, 0x01, 0x0a, 0x09, 0x5a, 0x6f, 0x6e, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x08, 0x64, 0x6e, 0x73, 0x5f, 0x73, - 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x44, 0x6e, 0x73, - 0x53, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x64, 0x6e, 0x73, 0x53, 0x65, - 0x74, 0x73, 0x1a, 0x4a, 0x0a, 0x0c, 0x44, 0x6e, 0x73, 0x53, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x44, 0x4e, 0x53, - 0x53, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x7c, - 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x69, 0x64, 0x12, 0x3c, - 0x0a, 0x0e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0d, 0x63, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xaa, 0x01, 0x0a, - 0x0d, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, - 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, - 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, - 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x06, 0x63, 0x68, 0x61, 0x6e, - 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x52, - 0x06, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x30, 0x0a, 0x0a, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, - 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a, - 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x02, 0x22, 0xa3, 0x01, 0x0a, 0x08, 0x4c, 0x6f, - 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2c, 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4c, 0x6f, 0x67, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x65, 0x76, - 0x65, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x31, 0x0a, 0x05, - 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x00, - 0x12, 0x08, 0x0a, 0x04, 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x57, 0x41, - 0x52, 0x4e, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x22, - 0x85, 0x01, 0x0a, 0x0f, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x72, - 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x0b, 0x6c, 0x6f, 0x67, 0x5f, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x6c, 0x6f, 0x67, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xbc, 0x01, 0x0a, 0x0e, 0x43, 0x68, 0x61, 0x6e, - 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x23, - 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x22, 0x51, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x11, 0x0a, 0x0d, - 0x4e, 0x4f, 0x54, 0x5f, 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x0d, 0x0a, 0x09, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, - 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x46, - 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x48, 0x52, 0x4f, 0x54, - 0x54, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x32, 0xfe, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x36, 0x0a, 0x05, 0x4c, 0x6f, 0x67, - 0x69, 0x6e, 0x12, 0x14, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x69, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x34, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x12, 0x17, 0x2e, - 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, - 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x5a, 0x6f, - 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x2e, 0x47, 0x65, 0x74, 0x5a, 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x5a, 0x6f, - 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x07, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x65, 0x12, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x72, - 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x61, 0x72, 0x64, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x65, - 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2d, 0x64, 0x6e, 0x73, 0x2d, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x46, 0x0a, 0x0e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0d, + 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x1e, 0x0a, + 0x06, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0xb3, 0x01, + 0x0a, 0x0d, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x4f, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, + 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0xf3, 0x01, 0x0a, 0x06, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x12, 0x19, + 0x0a, 0x08, 0x64, 0x6e, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x64, 0x6e, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x35, 0x0a, 0x07, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x72, 0x65, 0x63, 0x6f, + 0x72, 0x64, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x65, 0x74, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x1a, 0x4d, 0x0a, 0x0c, 0x52, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc7, 0x01, 0x0a, 0x0d, 0x50, 0x61, + 0x72, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x64, + 0x6e, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, + 0x6e, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, 0x0a, 0x0a, 0x72, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, + 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, + 0x74, 0x52, 0x09, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, + 0x73, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x22, 0xa4, 0x01, 0x0a, 0x09, 0x5a, 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x08, 0x64, 0x6e, 0x73, 0x5f, 0x73, 0x65, 0x74, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x5a, + 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x44, 0x6e, 0x73, 0x53, 0x65, 0x74, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x64, 0x6e, 0x73, 0x53, 0x65, 0x74, 0x73, 0x1a, 0x4a, + 0x0a, 0x0c, 0x44, 0x6e, 0x73, 0x53, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x7c, 0x0a, 0x0e, 0x45, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x69, 0x64, 0x12, 0x3c, 0x0a, 0x0e, 0x63, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xaa, 0x01, 0x0a, 0x0d, 0x43, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x06, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x06, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, + 0x72, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x52, 0x06, 0x63, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x22, 0x30, 0x0a, 0x0a, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x00, 0x12, 0x0a, 0x0a, + 0x06, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, + 0x45, 0x54, 0x45, 0x10, 0x02, 0x22, 0xa3, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x12, 0x2c, 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x2e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x31, 0x0a, 0x05, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, + 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x57, 0x41, 0x52, 0x4e, 0x10, 0x02, + 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x22, 0x85, 0x01, 0x0a, 0x0f, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x3f, 0x0a, 0x0f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x52, 0x0e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x31, 0x0a, 0x0b, 0x6c, 0x6f, 0x67, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4c, + 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x6c, 0x6f, 0x67, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x22, 0xbc, 0x01, 0x0a, 0x0e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, + 0x51, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x4e, 0x4f, 0x54, 0x5f, + 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x53, + 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, + 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, + 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x48, 0x52, 0x4f, 0x54, 0x54, 0x4c, 0x45, 0x44, + 0x10, 0x04, 0x32, 0xfe, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x36, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x14, + 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4c, 0x6f, + 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x34, 0x0a, + 0x08, 0x47, 0x65, 0x74, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x12, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, + 0x73, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x5a, 0x6f, 0x6e, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x1b, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x47, 0x65, 0x74, + 0x5a, 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x11, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x07, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, + 0x12, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x61, 0x72, 0x64, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2d, 0x64, 0x6e, 0x73, 0x2d, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -1266,58 +1366,62 @@ func file_pkg_server_remote_common_remote_proto_rawDescGZIP() []byte { } var file_pkg_server_remote_common_remote_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_pkg_server_remote_common_remote_proto_msgTypes = make([]protoimpl.MessageInfo, 18) +var file_pkg_server_remote_common_remote_proto_msgTypes = make([]protoimpl.MessageInfo, 20) var file_pkg_server_remote_common_remote_proto_goTypes = []interface{}{ - (ChangeRequest_ActionType)(0), // 0: remote.ChangeRequest.ActionType - (LogEntry_Level)(0), // 1: remote.LogEntry.Level - (ChangeResponse_State)(0), // 2: remote.ChangeResponse.State - (*LoginRequest)(nil), // 3: remote.LoginRequest - (*LoginResponse)(nil), // 4: remote.LoginResponse - (*GetZonesRequest)(nil), // 5: remote.GetZonesRequest - (*Zones)(nil), // 6: remote.Zones - (*Zone)(nil), // 7: remote.Zone - (*GetZoneStateRequest)(nil), // 8: remote.GetZoneStateRequest - (*RecordSet)(nil), // 9: remote.RecordSet - (*DNSSet)(nil), // 10: remote.DNSSet - (*PartialDNSSet)(nil), // 11: remote.PartialDNSSet - (*ZoneState)(nil), // 12: remote.ZoneState - (*ExecuteRequest)(nil), // 13: remote.ExecuteRequest - (*ChangeRequest)(nil), // 14: remote.ChangeRequest - (*LogEntry)(nil), // 15: remote.LogEntry - (*ExecuteResponse)(nil), // 16: remote.ExecuteResponse - (*ChangeResponse)(nil), // 17: remote.ChangeResponse - (*RecordSet_Record)(nil), // 18: remote.RecordSet.Record - nil, // 19: remote.DNSSet.RecordsEntry - nil, // 20: remote.ZoneState.DnsSetsEntry + (ChangeRequest_ActionType)(0), // 0: remote.ChangeRequest.ActionType + (LogEntry_Level)(0), // 1: remote.LogEntry.Level + (ChangeResponse_State)(0), // 2: remote.ChangeResponse.State + (*LoginRequest)(nil), // 3: remote.LoginRequest + (*LoginResponse)(nil), // 4: remote.LoginResponse + (*GetZonesRequest)(nil), // 5: remote.GetZonesRequest + (*Zones)(nil), // 6: remote.Zones + (*Zone)(nil), // 7: remote.Zone + (*GetZoneStateRequest)(nil), // 8: remote.GetZoneStateRequest + (*RecordSet)(nil), // 9: remote.RecordSet + (*DNSSet)(nil), // 10: remote.DNSSet + (*PartialDNSSet)(nil), // 11: remote.PartialDNSSet + (*ZoneState)(nil), // 12: remote.ZoneState + (*ExecuteRequest)(nil), // 13: remote.ExecuteRequest + (*ChangeRequest)(nil), // 14: remote.ChangeRequest + (*LogEntry)(nil), // 15: remote.LogEntry + (*ExecuteResponse)(nil), // 16: remote.ExecuteResponse + (*ChangeResponse)(nil), // 17: remote.ChangeResponse + (*RecordSet_Record)(nil), // 18: remote.RecordSet.Record + (*RecordSet_RoutingPolicy)(nil), // 19: remote.RecordSet.RoutingPolicy + nil, // 20: remote.RecordSet.RoutingPolicy.ParametersEntry + nil, // 21: remote.DNSSet.RecordsEntry + nil, // 22: remote.ZoneState.DnsSetsEntry } var file_pkg_server_remote_common_remote_proto_depIdxs = []int32{ 7, // 0: remote.Zones.zone:type_name -> remote.Zone 18, // 1: remote.RecordSet.record:type_name -> remote.RecordSet.Record - 19, // 2: remote.DNSSet.records:type_name -> remote.DNSSet.RecordsEntry - 9, // 3: remote.PartialDNSSet.record_set:type_name -> remote.RecordSet - 20, // 4: remote.ZoneState.dns_sets:type_name -> remote.ZoneState.DnsSetsEntry - 14, // 5: remote.ExecuteRequest.change_request:type_name -> remote.ChangeRequest - 0, // 6: remote.ChangeRequest.action:type_name -> remote.ChangeRequest.ActionType - 11, // 7: remote.ChangeRequest.change:type_name -> remote.PartialDNSSet - 1, // 8: remote.LogEntry.level:type_name -> remote.LogEntry.Level - 17, // 9: remote.ExecuteResponse.change_response:type_name -> remote.ChangeResponse - 15, // 10: remote.ExecuteResponse.log_message:type_name -> remote.LogEntry - 2, // 11: remote.ChangeResponse.state:type_name -> remote.ChangeResponse.State - 9, // 12: remote.DNSSet.RecordsEntry.value:type_name -> remote.RecordSet - 10, // 13: remote.ZoneState.DnsSetsEntry.value:type_name -> remote.DNSSet - 3, // 14: remote.RemoteProvider.Login:input_type -> remote.LoginRequest - 5, // 15: remote.RemoteProvider.GetZones:input_type -> remote.GetZonesRequest - 8, // 16: remote.RemoteProvider.GetZoneState:input_type -> remote.GetZoneStateRequest - 13, // 17: remote.RemoteProvider.Execute:input_type -> remote.ExecuteRequest - 4, // 18: remote.RemoteProvider.Login:output_type -> remote.LoginResponse - 6, // 19: remote.RemoteProvider.GetZones:output_type -> remote.Zones - 12, // 20: remote.RemoteProvider.GetZoneState:output_type -> remote.ZoneState - 16, // 21: remote.RemoteProvider.Execute:output_type -> remote.ExecuteResponse - 18, // [18:22] is the sub-list for method output_type - 14, // [14:18] is the sub-list for method input_type - 14, // [14:14] is the sub-list for extension type_name - 14, // [14:14] is the sub-list for extension extendee - 0, // [0:14] is the sub-list for field type_name + 19, // 2: remote.RecordSet.routing_policy:type_name -> remote.RecordSet.RoutingPolicy + 21, // 3: remote.DNSSet.records:type_name -> remote.DNSSet.RecordsEntry + 9, // 4: remote.PartialDNSSet.record_set:type_name -> remote.RecordSet + 22, // 5: remote.ZoneState.dns_sets:type_name -> remote.ZoneState.DnsSetsEntry + 14, // 6: remote.ExecuteRequest.change_request:type_name -> remote.ChangeRequest + 0, // 7: remote.ChangeRequest.action:type_name -> remote.ChangeRequest.ActionType + 11, // 8: remote.ChangeRequest.change:type_name -> remote.PartialDNSSet + 1, // 9: remote.LogEntry.level:type_name -> remote.LogEntry.Level + 17, // 10: remote.ExecuteResponse.change_response:type_name -> remote.ChangeResponse + 15, // 11: remote.ExecuteResponse.log_message:type_name -> remote.LogEntry + 2, // 12: remote.ChangeResponse.state:type_name -> remote.ChangeResponse.State + 20, // 13: remote.RecordSet.RoutingPolicy.parameters:type_name -> remote.RecordSet.RoutingPolicy.ParametersEntry + 9, // 14: remote.DNSSet.RecordsEntry.value:type_name -> remote.RecordSet + 10, // 15: remote.ZoneState.DnsSetsEntry.value:type_name -> remote.DNSSet + 3, // 16: remote.RemoteProvider.Login:input_type -> remote.LoginRequest + 5, // 17: remote.RemoteProvider.GetZones:input_type -> remote.GetZonesRequest + 8, // 18: remote.RemoteProvider.GetZoneState:input_type -> remote.GetZoneStateRequest + 13, // 19: remote.RemoteProvider.Execute:input_type -> remote.ExecuteRequest + 4, // 20: remote.RemoteProvider.Login:output_type -> remote.LoginResponse + 6, // 21: remote.RemoteProvider.GetZones:output_type -> remote.Zones + 12, // 22: remote.RemoteProvider.GetZoneState:output_type -> remote.ZoneState + 16, // 23: remote.RemoteProvider.Execute:output_type -> remote.ExecuteResponse + 20, // [20:24] is the sub-list for method output_type + 16, // [16:20] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name } func init() { file_pkg_server_remote_common_remote_proto_init() } @@ -1518,6 +1622,18 @@ func file_pkg_server_remote_common_remote_proto_init() { return nil } } + file_pkg_server_remote_common_remote_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RecordSet_RoutingPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -1525,7 +1641,7 @@ func file_pkg_server_remote_common_remote_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_pkg_server_remote_common_remote_proto_rawDesc, NumEnums: 3, - NumMessages: 18, + NumMessages: 20, NumExtensions: 0, NumServices: 1, }, diff --git a/pkg/server/remote/common/remote.proto b/pkg/server/remote/common/remote.proto index 9afb87a2e..eb14822f6 100644 --- a/pkg/server/remote/common/remote.proto +++ b/pkg/server/remote/common/remote.proto @@ -51,15 +51,22 @@ message RecordSet { string value = 1; } + message RoutingPolicy { + string type = 1; + map parameters = 2; + } + string type = 1; int32 ttl = 2; repeated Record record = 3; + RoutingPolicy routing_policy = 4; } message DNSSet { string dns_name = 1; string update_group = 2; map records = 3; + string set_identifier = 4; } message PartialDNSSet { @@ -67,6 +74,7 @@ message PartialDNSSet { string update_group = 2; string record_type = 3; RecordSet record_set = 4; + string set_identifier = 5; } message ZoneState { diff --git a/pkg/server/remote/conversion/conversion.go b/pkg/server/remote/conversion/conversion.go index 28651af90..e4b5b09b3 100644 --- a/pkg/server/remote/conversion/conversion.go +++ b/pkg/server/remote/conversion/conversion.go @@ -18,6 +18,7 @@ package conversion import ( "fmt" + "strings" "github.com/gardener/external-dns-management/pkg/dns" "github.com/gardener/external-dns-management/pkg/dns/provider" @@ -27,16 +28,33 @@ import ( func MarshalDNSSets(local dns.DNSSets) common.DNSSets { result := common.DNSSets{} for name, dnsset := range local { - result[name] = MarshalDNSSet(dnsset) + result[marshalRecordSetName(name)] = MarshalDNSSet(dnsset) } return result } +func marshalRecordSetName(name dns.RecordSetName) string { + if name.SetIdentifier == "" { + return name.DNSName + } + return name.DNSName + "\t" + name.SetIdentifier +} + +func unmarshalRecordSetName(marshalledName string) dns.RecordSetName { + parts := strings.Split(marshalledName, "\t") + setIdentifier := "" + if len(parts) == 2 { + setIdentifier = parts[1] + } + return dns.RecordSetName{DNSName: parts[0], SetIdentifier: setIdentifier} +} + func MarshalDNSSet(local *dns.DNSSet) *common.DNSSet { remote := &common.DNSSet{ - DnsName: local.Name, - UpdateGroup: local.UpdateGroup, - Records: map[string]*common.RecordSet{}, + DnsName: local.Name.DNSName, + SetIdentifier: local.Name.SetIdentifier, + UpdateGroup: local.UpdateGroup, + Records: map[string]*common.RecordSet{}, } for typ, rs := range local.Sets { remote.Records[typ] = MarshalRecordSet(rs) @@ -46,8 +64,9 @@ func MarshalDNSSet(local *dns.DNSSet) *common.DNSSet { func MarshalRecordSet(local *dns.RecordSet) *common.RecordSet { remote := &common.RecordSet{ - Type: local.Type, - Ttl: int32(local.TTL), + Type: local.Type, + Ttl: int32(local.TTL), + RoutingPolicy: MarshalRoutingPolicy(local.RoutingPolicy), } for _, v := range local.Records { remote.Record = append(remote.Record, &common.RecordSet_Record{Value: v.Value}) @@ -55,25 +74,40 @@ func MarshalRecordSet(local *dns.RecordSet) *common.RecordSet { return remote } +func MarshalRoutingPolicy(local *dns.RoutingPolicy) *common.RecordSet_RoutingPolicy { + if local == nil { + return nil + } + params := map[string]string{} + for k, v := range local.Parameters { + params[k] = v + } + return &common.RecordSet_RoutingPolicy{ + Type: local.Type, + Parameters: params, + } +} + func MarshalPartialDNSSet(local *dns.DNSSet, recordType string) *common.PartialDNSSet { return &common.PartialDNSSet{ - DnsName: local.Name, - UpdateGroup: local.UpdateGroup, - RecordType: recordType, - RecordSet: MarshalRecordSet(local.Sets[recordType]), + DnsName: local.Name.DNSName, + SetIdentifier: local.Name.SetIdentifier, + UpdateGroup: local.UpdateGroup, + RecordType: recordType, + RecordSet: MarshalRecordSet(local.Sets[recordType]), } } func UnmarshalDNSSets(remote common.DNSSets) dns.DNSSets { local := dns.DNSSets{} for name, set := range remote { - local[name] = UnmarshalDNSSet(set) + local[unmarshalRecordSetName(name)] = UnmarshalDNSSet(set) } return local } func UnmarshalDNSSet(remote *common.DNSSet) *dns.DNSSet { - local := dns.NewDNSSet(remote.DnsName) + local := dns.NewDNSSet(dns.RecordSetName{DNSName: remote.DnsName, SetIdentifier: remote.SetIdentifier}) local.UpdateGroup = remote.UpdateGroup for typ, rs := range remote.Records { @@ -84,14 +118,29 @@ func UnmarshalDNSSet(remote *common.DNSSet) *dns.DNSSet { func UnmarshalRecordSet(rs *common.RecordSet) *dns.RecordSet { local := dns.NewRecordSet(rs.Type, int64(rs.Ttl), nil) + local.RoutingPolicy = UnmarshalRoutingPolicy(rs.RoutingPolicy) for _, v := range rs.Record { local.Add(&dns.Record{Value: v.Value}) } return local } +func UnmarshalRoutingPolicy(policy *common.RecordSet_RoutingPolicy) *dns.RoutingPolicy { + if policy == nil { + return nil + } + params := map[string]string{} + for k, v := range policy.Parameters { + params[k] = v + } + return &dns.RoutingPolicy{ + Type: policy.Type, + Parameters: params, + } +} + func UnmarshalPartialDNSSet(remote *common.PartialDNSSet) *dns.DNSSet { - local := dns.NewDNSSet(remote.DnsName) + local := dns.NewDNSSet(dns.RecordSetName{DNSName: remote.DnsName, SetIdentifier: remote.SetIdentifier}) local.UpdateGroup = remote.UpdateGroup local.Sets[remote.RecordType] = UnmarshalRecordSet(remote.RecordSet) diff --git a/pkg/server/remote/conversion/conversion_test.go b/pkg/server/remote/conversion/conversion_test.go index 734224cd6..dbae57576 100644 --- a/pkg/server/remote/conversion/conversion_test.go +++ b/pkg/server/remote/conversion/conversion_test.go @@ -27,9 +27,19 @@ import ( func TestMarshalDNSSets(t *testing.T) { sets1 := dns.DNSSets{} rsb := dns.NewRecordSet(dns.RS_A, 100, []*dns.Record{{Value: "1.1.1.1"}, {Value: "1.1.1.2"}}) - rsc := dns.NewRecordSet(dns.RS_TXT, 200, []*dns.Record{{Value: "foo"}, {Value: "bar"}}) - sets1.AddRecordSet("b.a", rsb) - sets1.AddRecordSet("c.a", rsc) + rsc1 := dns.NewRecordSet(dns.RS_TXT, 200, []*dns.Record{{Value: "foo"}, {Value: "bar"}}) + rsc1.RoutingPolicy = &dns.RoutingPolicy{ + Type: "weighted", + Parameters: map[string]string{"weight": "1"}, + } + rsc2 := dns.NewRecordSet(dns.RS_TXT, 200, []*dns.Record{{Value: "foo"}, {Value: "bla"}}) + rsc2.RoutingPolicy = &dns.RoutingPolicy{ + Type: "weighted", + Parameters: map[string]string{"weight": "2"}, + } + sets1.AddRecordSet(dns.RecordSetName{DNSName: "b.a"}, rsb) + sets1.AddRecordSet(dns.RecordSetName{DNSName: "c.a", SetIdentifier: "id1"}, rsc1) + sets1.AddRecordSet(dns.RecordSetName{DNSName: "c.a", SetIdentifier: "id2"}, rsc2) table := []struct { name string sets dns.DNSSets @@ -49,18 +59,35 @@ func TestMarshalDNSSets(t *testing.T) { } func TestMarshalChangeRequest(t *testing.T) { - set := dns.NewDNSSet("a.b") + doTestMarshalChangeRequest(t, false) +} + +func TestMarshalChangeRequestWithRoutingPolicy(t *testing.T) { + doTestMarshalChangeRequest(t, false) +} + +func doTestMarshalChangeRequest(t *testing.T, withPolicy bool) { + var routingPolicy *dns.RoutingPolicy + setIdentifier := "" + if withPolicy { + setIdentifier = "id1" + routingPolicy = &dns.RoutingPolicy{ + Type: dns.RoutingPolicyWeighted, + Parameters: map[string]string{"weight": "100"}, + } + } + set := dns.NewDNSSet(dns.RecordSetName{DNSName: "b.a", SetIdentifier: setIdentifier}) set.UpdateGroup = "group1" - set.SetMetaAttr(dns.ATTR_OWNER, "owner1") - set.SetMetaAttr(dns.ATTR_PREFIX, "comment-") - set.SetRecordSet(dns.RS_A, 100, "1.1.1.1", "1.1.1.2") + set.SetMetaAttr(dns.ATTR_OWNER, "owner1", routingPolicy) + set.SetMetaAttr(dns.ATTR_PREFIX, "comment-", routingPolicy) + set.SetRecordSet(dns.RS_A, 100, routingPolicy, "1.1.1.1", "1.1.1.2") table := []struct { name string request *provider.ChangeRequest }{ - {"create", provider.NewChangeRequest(provider.R_CREATE, dns.RS_A, nil, set, nil)}, - {"update", provider.NewChangeRequest(provider.R_UPDATE, dns.RS_META, nil, set, nil)}, - {"delete", provider.NewChangeRequest(provider.R_DELETE, dns.RS_A, set, nil, nil)}, + {"create", provider.NewChangeRequest(provider.R_CREATE, dns.RS_A, nil, set, nil, routingPolicy)}, + {"update", provider.NewChangeRequest(provider.R_UPDATE, dns.RS_META, nil, set, nil, routingPolicy)}, + {"delete", provider.NewChangeRequest(provider.R_DELETE, dns.RS_A, set, nil, nil, routingPolicy)}, } for _, item := range table { @@ -84,7 +111,8 @@ func TestMarshalChangeRequest(t *testing.T) { del = item.request.Deletion.Clone() del.Sets = map[string]*dns.RecordSet{item.request.Type: del.Sets[item.request.Type]} } - expected := provider.NewChangeRequest(item.request.Action, item.request.Type, del, add, item.request.Done) + expected := provider.NewChangeRequest(item.request.Action, item.request.Type, del, add, item.request.Done, routingPolicy) + expected.Done = nil if !reflect.DeepEqual(expected, copy) { t.Errorf("change request mismatch: %s", item.name) } diff --git a/test/integration/testenv.go b/test/integration/testenv.go index 1bbea8295..d28b990cc 100644 --- a/test/integration/testenv.go +++ b/test/integration/testenv.go @@ -792,7 +792,7 @@ func (te *TestEnv) MockInMemoryGetDNSSetEx(name, zonePrefix, dnsName string) (*d if err != nil { return nil, err } - if set := state.GetDNSSets()[dnsName]; set != nil { + if set := state.GetDNSSets()[dns.RecordSetName{DNSName: dnsName}]; set != nil { return set, nil } } From 78a4324d82ef1dcc425d457499b60b226e80f50b Mon Sep 17 00:00:00 2001 From: Martin Weindel Date: Tue, 12 Jul 2022 16:28:22 +0200 Subject: [PATCH 2/7] safe guards for old remote protocol without routing policy support --- .../crds/dns.gardener.cloud_dnsentries.yaml | 2 +- pkg/apis/dns/crds/zz_generated_crds.go | 6 +- pkg/controller/provider/remote/handler.go | 39 +- pkg/server/remote/common/remote.pb.go | 346 ++++++++++-------- pkg/server/remote/common/remote.proto | 4 +- pkg/server/remote/common/types.go | 6 + pkg/server/remote/conversion/conversion.go | 7 +- .../remote/conversion/conversion_test.go | 20 +- pkg/server/remote/server.go | 26 +- pkg/server/remote/state.go | 18 +- 10 files changed, 267 insertions(+), 207 deletions(-) diff --git a/pkg/apis/dns/crds/dns.gardener.cloud_dnsentries.yaml b/pkg/apis/dns/crds/dns.gardener.cloud_dnsentries.yaml index 4a0233677..83db71029 100644 --- a/pkg/apis/dns/crds/dns.gardener.cloud_dnsentries.yaml +++ b/pkg/apis/dns/crds/dns.gardener.cloud_dnsentries.yaml @@ -103,7 +103,7 @@ spec: - name type: object routingPolicy: - description: optional routing policy like weighted, geolocation,... + description: optional routing policy properties: parameters: additionalProperties: diff --git a/pkg/apis/dns/crds/zz_generated_crds.go b/pkg/apis/dns/crds/zz_generated_crds.go index 8f72df450..b606e07c5 100644 --- a/pkg/apis/dns/crds/zz_generated_crds.go +++ b/pkg/apis/dns/crds/zz_generated_crds.go @@ -241,7 +241,7 @@ spec: - name type: object routingPolicy: - description: optional routing policy like weighted, geolocation,... + description: optional routing policy properties: parameters: additionalProperties: @@ -253,7 +253,7 @@ spec: type: string type: description: Policy is the policy type. Allowed values are provider - dependent, e.g. `+"`"+`weighted`+"`"+` + dependent, e.g. ` + "`" + `weighted` + "`" + ` type: string required: - parameters @@ -310,7 +310,7 @@ spec: type: string type: description: Policy is the policy type. Allowed values are provider - dependent, e.g. `+"`"+`weighted`+"`"+` + dependent, e.g. ` + "`" + `weighted` + "`" + ` type: string required: - parameters diff --git a/pkg/controller/provider/remote/handler.go b/pkg/controller/provider/remote/handler.go index 1371f550c..3194c73a5 100644 --- a/pkg/controller/provider/remote/handler.go +++ b/pkg/controller/provider/remote/handler.go @@ -41,15 +41,16 @@ import ( type Handler struct { provider.DefaultDNSHandler - config provider.DNSHandlerConfig - cache provider.ZoneCache - clientID string - remoteNamespace string - currentToken string - connection *grpc.ClientConn - client common.RemoteProviderClient - sess *session.Session - r53 *route53.Route53 + config provider.DNSHandlerConfig + cache provider.ZoneCache + clientID string + remoteNamespace string + currentToken string + serverProtocolVersion int32 + connection *grpc.ClientConn + client common.RemoteProviderClient + sess *session.Session + r53 *route53.Route53 } var _ provider.DNSHandler = &Handler{} @@ -160,8 +161,9 @@ func (h *Handler) GetZones() (provider.DNSHostedZones, error) { func (h *Handler) login(ctx context.Context) error { h.config.RateLimiter.Accept() response, err := h.client.Login(ctx, &common.LoginRequest{ - Namespace: h.remoteNamespace, - CliendID: h.clientID, + Namespace: h.remoteNamespace, + CliendID: h.clientID, + ClientProtocolVersion: common.ProtocolVersion1, }) if err != nil { if s, ok := status.FromError(err); ok { @@ -174,6 +176,7 @@ func (h *Handler) login(ctx context.Context) error { return err } h.currentToken = response.Token + h.serverProtocolVersion = response.ServerProtocolVersion return nil } @@ -266,9 +269,21 @@ func (h *Handler) executeRequests(logger logger.LogContext, zone provider.DNSHos var changeRequests []*common.ChangeRequest for _, req := range reqs { + if req.RoutingPolicy != nil && h.serverProtocolVersion != common.ProtocolVersion1 { + err := fmt.Errorf("routing policy not supported by remote server version") + logger.Warnf("%s", err) + if req.Done != nil { + req.Done.Failed(err) + } + continue + } change, err := conversion.MarshalChangeRequest(req) if err != nil { - return err + logger.Warnf("marshal failed: %s", err) + if req.Done != nil { + req.Done.SetInvalid(err) + } + continue } changeRequests = append(changeRequests, change) diff --git a/pkg/server/remote/common/remote.pb.go b/pkg/server/remote/common/remote.pb.go index ffe639b4d..6f813d6c2 100644 --- a/pkg/server/remote/common/remote.pb.go +++ b/pkg/server/remote/common/remote.pb.go @@ -181,8 +181,9 @@ type LoginRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` - CliendID string `protobuf:"bytes,2,opt,name=cliendID,proto3" json:"cliendID,omitempty"` + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + CliendID string `protobuf:"bytes,2,opt,name=cliendID,proto3" json:"cliendID,omitempty"` + ClientProtocolVersion int32 `protobuf:"varint,3,opt,name=clientProtocolVersion,proto3" json:"clientProtocolVersion,omitempty"` } func (x *LoginRequest) Reset() { @@ -231,12 +232,20 @@ func (x *LoginRequest) GetCliendID() string { return "" } +func (x *LoginRequest) GetClientProtocolVersion() int32 { + if x != nil { + return x.ClientProtocolVersion + } + return 0 +} + type LoginResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"` + Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"` + ServerProtocolVersion int32 `protobuf:"varint,2,opt,name=serverProtocolVersion,proto3" json:"serverProtocolVersion,omitempty"` } func (x *LoginResponse) Reset() { @@ -278,6 +287,13 @@ func (x *LoginResponse) GetToken() string { return "" } +func (x *LoginResponse) GetServerProtocolVersion() int32 { + if x != nil { + return x.ServerProtocolVersion + } + return 0 +} + type GetZonesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1189,168 +1205,174 @@ var file_pkg_server_remote_common_remote_proto_rawDesc = []byte{ 0x0a, 0x25, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x22, - 0x48, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x7e, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x64, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x64, 0x49, 0x44, 0x22, 0x25, 0x0a, 0x0d, 0x4c, 0x6f, 0x67, - 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x22, 0x27, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x29, 0x0a, 0x05, 0x5a, 0x6f, 0x6e, - 0x65, 0x73, 0x12, 0x20, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x0c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x52, 0x04, - 0x7a, 0x6f, 0x6e, 0x65, 0x22, 0xb3, 0x01, 0x0a, 0x04, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x0e, 0x0a, - 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x23, 0x0a, - 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x29, 0x0a, 0x10, - 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, - 0x64, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x69, 0x76, 0x61, - 0x74, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, - 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x22, 0x43, 0x0a, 0x13, 0x47, 0x65, - 0x74, 0x5a, 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x69, 0x64, 0x22, - 0x81, 0x03, 0x0a, 0x09, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x12, 0x12, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, - 0x74, 0x74, 0x6c, 0x12, 0x30, 0x0a, 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x06, 0x72, - 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x46, 0x0a, 0x0e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, - 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, - 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, - 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0d, - 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x1e, 0x0a, - 0x06, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0xb3, 0x01, - 0x0a, 0x0d, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x12, 0x4f, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, - 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0xf3, 0x01, 0x0a, 0x06, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x12, 0x19, - 0x0a, 0x08, 0x64, 0x6e, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x64, 0x6e, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x35, 0x0a, 0x07, - 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, - 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x72, 0x65, 0x63, 0x6f, - 0x72, 0x64, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x65, 0x74, - 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x1a, 0x4d, 0x0a, 0x0c, 0x52, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x72, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc7, 0x01, 0x0a, 0x0d, 0x50, 0x61, - 0x72, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x64, - 0x6e, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, - 0x6e, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, 0x0a, 0x0a, 0x72, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, - 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, - 0x74, 0x52, 0x09, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, - 0x73, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x22, 0xa4, 0x01, 0x0a, 0x09, 0x5a, 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x08, 0x64, 0x6e, 0x73, 0x5f, 0x73, 0x65, 0x74, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x5a, - 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x44, 0x6e, 0x73, 0x53, 0x65, 0x74, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x64, 0x6e, 0x73, 0x53, 0x65, 0x74, 0x73, 0x1a, 0x4a, - 0x0a, 0x0c, 0x44, 0x6e, 0x73, 0x53, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x7c, 0x0a, 0x0e, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x69, 0x64, 0x12, 0x3c, 0x0a, 0x0e, 0x63, 0x68, - 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6e, - 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xaa, 0x01, 0x0a, 0x0d, 0x43, 0x68, 0x61, - 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x06, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x72, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x06, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, - 0x72, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x52, 0x06, 0x63, 0x68, 0x61, - 0x6e, 0x67, 0x65, 0x22, 0x30, 0x0a, 0x0a, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x00, 0x12, 0x0a, 0x0a, - 0x06, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, - 0x45, 0x54, 0x45, 0x10, 0x02, 0x22, 0xa3, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x12, 0x2c, 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x2e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x18, - 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x31, 0x0a, 0x05, 0x4c, 0x65, 0x76, 0x65, - 0x6c, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, - 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x57, 0x41, 0x52, 0x4e, 0x10, 0x02, - 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x22, 0x85, 0x01, 0x0a, 0x0f, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x3f, 0x0a, 0x0f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x52, 0x0e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x31, 0x0a, 0x0b, 0x6c, 0x6f, 0x67, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4c, - 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x6c, 0x6f, 0x67, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x22, 0xbc, 0x01, 0x0a, 0x0e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x43, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, - 0x51, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x4e, 0x4f, 0x54, 0x5f, - 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x53, - 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, - 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, - 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x48, 0x52, 0x4f, 0x54, 0x54, 0x4c, 0x45, 0x44, - 0x10, 0x04, 0x32, 0xfe, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x36, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x14, - 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4c, 0x6f, - 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x34, 0x0a, - 0x08, 0x47, 0x65, 0x74, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x12, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, - 0x73, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x5a, 0x6f, 0x6e, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x1b, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x47, 0x65, 0x74, - 0x5a, 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x11, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x07, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, - 0x12, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x67, 0x61, 0x72, 0x64, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2d, 0x64, 0x6e, 0x73, 0x2d, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x72, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x64, 0x49, 0x44, 0x12, 0x34, 0x0a, 0x15, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, + 0x5b, 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x34, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x27, 0x0a, 0x0f, + 0x47, 0x65, 0x74, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x29, 0x0a, 0x05, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x12, 0x20, + 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, + 0x22, 0xb3, 0x01, 0x0a, 0x04, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x66, 0x6f, 0x72, 0x77, + 0x61, 0x72, 0x64, 0x65, 0x64, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x44, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x7a, + 0x6f, 0x6e, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x22, 0x43, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x5a, 0x6f, 0x6e, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, + 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x69, 0x64, 0x22, 0x81, 0x03, 0x0a, 0x09, + 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, + 0x03, 0x74, 0x74, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, + 0x30, 0x0a, 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, + 0x65, 0x74, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, + 0x64, 0x12, 0x46, 0x0a, 0x0e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0d, 0x72, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x1e, 0x0a, 0x06, 0x52, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0xb3, 0x01, 0x0a, 0x0d, 0x52, 0x6f, + 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x4f, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0xf3, 0x01, 0x0a, 0x06, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x64, 0x6e, + 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6e, + 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x35, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x6f, + 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x2e, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, + 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x1a, 0x4d, 0x0a, 0x0c, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc7, 0x01, 0x0a, 0x0d, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, + 0x6c, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x64, 0x6e, 0x73, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6e, 0x73, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x63, 0x6f, + 0x72, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, 0x0a, 0x0a, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x5f, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x52, 0x09, 0x72, + 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x74, 0x5f, + 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x73, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x22, + 0xa4, 0x01, 0x0a, 0x09, 0x5a, 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x39, 0x0a, 0x08, 0x64, 0x6e, 0x73, 0x5f, 0x73, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x2e, 0x44, 0x6e, 0x73, 0x53, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x07, 0x64, 0x6e, 0x73, 0x53, 0x65, 0x74, 0x73, 0x1a, 0x4a, 0x0a, 0x0c, 0x44, 0x6e, + 0x73, 0x53, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x7c, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, + 0x0a, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x7a, 0x6f, 0x6e, 0x65, 0x69, 0x64, 0x12, 0x3c, 0x0a, 0x0e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x22, 0xaa, 0x01, 0x0a, 0x0d, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x2d, 0x0a, 0x06, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, + 0x6c, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x52, 0x06, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x22, + 0x30, 0x0a, 0x0a, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, + 0x06, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x44, + 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, + 0x02, 0x22, 0xa3, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, + 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2c, 0x0a, 0x05, + 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x22, 0x31, 0x0a, 0x05, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x09, 0x0a, + 0x05, 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x4e, 0x46, 0x4f, + 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x57, 0x41, 0x52, 0x4e, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, + 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x22, 0x85, 0x01, 0x0a, 0x0f, 0x45, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0f, 0x63, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0e, 0x63, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x0b, + 0x6c, 0x6f, 0x67, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x10, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0a, 0x6c, 0x6f, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, + 0xbc, 0x01, 0x0a, 0x0e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x1c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x51, 0x0a, 0x05, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x4e, 0x4f, 0x54, 0x5f, 0x50, 0x52, 0x4f, 0x43, + 0x45, 0x53, 0x53, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x55, 0x43, 0x43, 0x45, + 0x45, 0x44, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, + 0x44, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x12, + 0x0d, 0x0a, 0x09, 0x54, 0x48, 0x52, 0x4f, 0x54, 0x54, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x32, 0xfe, + 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x12, 0x36, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x14, 0x2e, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x34, 0x0a, 0x08, 0x47, 0x65, 0x74, + 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x12, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x47, + 0x65, 0x74, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, + 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x22, 0x00, 0x12, + 0x40, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x5a, 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x1b, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x5a, 0x6f, 0x6e, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, + 0x00, 0x12, 0x3c, 0x0a, 0x07, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x12, 0x16, 0x2e, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, + 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x61, + 0x72, 0x64, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2d, + 0x64, 0x6e, 0x73, 0x2d, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x70, + 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/pkg/server/remote/common/remote.proto b/pkg/server/remote/common/remote.proto index eb14822f6..10cfa3492 100644 --- a/pkg/server/remote/common/remote.proto +++ b/pkg/server/remote/common/remote.proto @@ -18,10 +18,12 @@ service RemoteProvider { message LoginRequest { string namespace = 1; string cliendID = 2; + int32 clientProtocolVersion = 3; } message LoginResponse { - string token = 1; + string token = 1; + int32 serverProtocolVersion = 2; } message GetZonesRequest { diff --git a/pkg/server/remote/common/types.go b/pkg/server/remote/common/types.go index a36e2a705..b759f7b95 100644 --- a/pkg/server/remote/common/types.go +++ b/pkg/server/remote/common/types.go @@ -17,5 +17,11 @@ package common const InvalidToken = "[invalid token]" +const ( + // ProtocolVersion0 without support for routing policy + ProtocolVersion0 = 0 + // ProtocolVersion1 with support for routing policy + ProtocolVersion1 = 1 +) type DNSSets map[string]*DNSSet diff --git a/pkg/server/remote/conversion/conversion.go b/pkg/server/remote/conversion/conversion.go index e4b5b09b3..9849be3e3 100644 --- a/pkg/server/remote/conversion/conversion.go +++ b/pkg/server/remote/conversion/conversion.go @@ -25,10 +25,13 @@ import ( "github.com/gardener/external-dns-management/pkg/server/remote/common" ) -func MarshalDNSSets(local dns.DNSSets) common.DNSSets { +func MarshalDNSSets(local dns.DNSSets, protocolVersion int32) common.DNSSets { result := common.DNSSets{} for name, dnsset := range local { - result[marshalRecordSetName(name)] = MarshalDNSSet(dnsset) + if name.SetIdentifier == "" || protocolVersion == common.ProtocolVersion1 { + // don't return recordsets with routing policy for protocol version 0 + result[marshalRecordSetName(name)] = MarshalDNSSet(dnsset) + } } return result } diff --git a/pkg/server/remote/conversion/conversion_test.go b/pkg/server/remote/conversion/conversion_test.go index dbae57576..b7e7dcedf 100644 --- a/pkg/server/remote/conversion/conversion_test.go +++ b/pkg/server/remote/conversion/conversion_test.go @@ -22,6 +22,7 @@ import ( "github.com/gardener/external-dns-management/pkg/dns" "github.com/gardener/external-dns-management/pkg/dns/provider" + "github.com/gardener/external-dns-management/pkg/server/remote/common" ) func TestMarshalDNSSets(t *testing.T) { @@ -41,15 +42,24 @@ func TestMarshalDNSSets(t *testing.T) { sets1.AddRecordSet(dns.RecordSetName{DNSName: "c.a", SetIdentifier: "id1"}, rsc1) sets1.AddRecordSet(dns.RecordSetName{DNSName: "c.a", SetIdentifier: "id2"}, rsc2) table := []struct { - name string - sets dns.DNSSets + name string + sets dns.DNSSets + expectedSizeVersion1 int + expectedSizeVersion0 int }{ - {"empty", dns.DNSSets{}}, - {"sets1", sets1}, + {"empty", dns.DNSSets{}, 0, 0}, + {"sets1", sets1, 3, 1}, } for _, item := range table { - remote := MarshalDNSSets(item.sets) + remote0 := MarshalDNSSets(item.sets, common.ProtocolVersion0) + if len(remote0) != item.expectedSizeVersion0 { + t.Errorf("version 0 size mismatch: %d != %d", len(remote0), item.expectedSizeVersion0) + } + remote := MarshalDNSSets(item.sets, common.ProtocolVersion1) + if len(remote) != item.expectedSizeVersion1 { + t.Errorf("version 0 size mismatch: %d != %d", len(remote), item.expectedSizeVersion1) + } copy := UnmarshalDNSSets(remote) if !reflect.DeepEqual(item.sets, copy) { diff --git a/pkg/server/remote/server.go b/pkg/server/remote/server.go index b791ad4ca..1462236e3 100644 --- a/pkg/server/remote/server.go +++ b/pkg/server/remote/server.go @@ -123,19 +123,19 @@ func (s *server) ProviderRemovedEvent(logger logger.LogContext, objectName resou type reportFunc func(err error) -func (s *server) checkAuth(token, requestType, zoneid string) (*namespaceState, logger.LogContext, reportFunc, error) { +func (s *server) checkAuth(token, requestType, zoneid string) (*namespaceState, logger.LogContext, reportFunc, int32, error) { start := time.Now() parts := strings.SplitN(token, "|", 2) namespace := parts[0] nsState := s.getNamespaceState(namespace, false) if nsState == nil { - return nil, s.logctx, nil, fmt.Errorf("namespace %s not found or no providers available", namespace) + return nil, s.logctx, nil, 0, fmt.Errorf("namespace %s not found or no providers available", namespace) } - clientID, err := nsState.getToken(token) + clientID, version, err := nsState.getToken(token) logctx := s.logctx.NewContext("namespace", nsState.name).NewContext("clientID", clientID) if err != nil { - return nil, logctx, nil, err + return nil, logctx, nil, 0, err } rf := func(err error) { @@ -148,7 +148,7 @@ func (s *server) checkAuth(token, requestType, zoneid string) (*namespaceState, } metrics.ReportRemoteAccessRequests(namespace, clientID, requestType, zoneid) - return nsState, logctx, rf, nil + return nsState, logctx, rf, version, nil } func (s *server) Login(ctx context.Context, request *common.LoginRequest) (*common.LoginResponse, error) { @@ -174,8 +174,8 @@ func (s *server) Login(ctx context.Context, request *common.LoginRequest) (*comm return nil, fmt.Errorf("random failed: %w", err) } - token := nsState.generateAndAddToken(s.tokenTTL, rnd, request.CliendID, s.serverID) - return &common.LoginResponse{Token: token}, nil + token := nsState.generateAndAddToken(s.tokenTTL, rnd, request.CliendID, s.serverID, request.ClientProtocolVersion) + return &common.LoginResponse{Token: token, ServerProtocolVersion: common.ProtocolVersion1}, nil } func (s *server) checkNamespaceAuthorization(ctx context.Context, namespace string) (string, error) { @@ -217,7 +217,7 @@ func (s *server) cleanupTokens() { } func (s *server) GetZones(_ context.Context, request *common.GetZonesRequest) (*common.Zones, error) { - nsState, logctx, report, err := s.checkAuth(request.Token, "GetZones", "") + nsState, logctx, report, _, err := s.checkAuth(request.Token, "GetZones", "") if err != nil { logctx.Warn(err) return nil, err @@ -253,7 +253,7 @@ func (s *server) getZones(nsState *namespaceState, logctx logger.LogContext) (*c } func (s *server) GetZoneState(_ context.Context, request *common.GetZoneStateRequest) (*common.ZoneState, error) { - nsState, logctx, report, err := s.checkAuth(request.Token, "GetZoneState", request.Zoneid) + nsState, logctx, report, version, err := s.checkAuth(request.Token, "GetZoneState", request.Zoneid) if err != nil { logctx.Warn(err) return nil, err @@ -261,12 +261,12 @@ func (s *server) GetZoneState(_ context.Context, request *common.GetZoneStateReq logctx = logctx.NewContext("zoneid", request.Zoneid) logctx.Info("GetZoneState") - res, err := s.getZoneState(nsState, logctx, request.Zoneid) + res, err := s.getZoneState(nsState, logctx, request.Zoneid, version) report(err) return res, err } -func (s *server) getZoneState(nsState *namespaceState, logctx logger.LogContext, zoneid string) (*common.ZoneState, error) { +func (s *server) getZoneState(nsState *namespaceState, logctx logger.LogContext, zoneid string, version int32) (*common.ZoneState, error) { hstate, zone, err := nsState.lockupZone(s.spinning, zoneid) if err != nil { return nil, err @@ -281,14 +281,14 @@ func (s *server) getZoneState(nsState *namespaceState, logctx logger.LogContext, if err != nil { return nil, err } - result := &common.ZoneState{DnsSets: conversion.MarshalDNSSets(state.GetDNSSets())} + result := &common.ZoneState{DnsSets: conversion.MarshalDNSSets(state.GetDNSSets(), version)} logctx.Infof("GetZoneState: %d DNSSets", len(result.GetDnsSets())) return result, nil } func (s *server) Execute(_ context.Context, request *common.ExecuteRequest) (*common.ExecuteResponse, error) { - nsState, logctx, report, err := s.checkAuth(request.Token, "Execute", request.Zoneid) + nsState, logctx, report, _, err := s.checkAuth(request.Token, "Execute", request.Zoneid) if err != nil { logctx.Warn(err) return nil, err diff --git a/pkg/server/remote/state.go b/pkg/server/remote/state.go index c05e9ae5c..e9e58097a 100644 --- a/pkg/server/remote/state.go +++ b/pkg/server/remote/state.go @@ -52,8 +52,9 @@ type handlerState struct { } type tokenState struct { - clientID string - validUntil time.Time + clientID string + validUntil time.Time + clientProtocolVersion int32 } func newNamespaceState(namespace string) *namespaceState { @@ -126,7 +127,7 @@ func (s *namespaceState) _refreshZones() { } } -func (s *namespaceState) getToken(token string) (string, error) { +func (s *namespaceState) getToken(token string) (string, int32, error) { s.lock.Lock() defer s.lock.Unlock() @@ -135,20 +136,21 @@ func (s *namespaceState) getToken(token string) (string, error) { if tstate != nil { delete(s.tokens, token) } - return "", fmt.Errorf("%s for namespace %s", common.InvalidToken, s.name) + return "", 0, fmt.Errorf("%s for namespace %s", common.InvalidToken, s.name) } - return tstate.clientID, nil + return tstate.clientID, tstate.clientProtocolVersion, nil } -func (s *namespaceState) generateAndAddToken(tokenTTL time.Duration, rnd, clientID, server string) string { +func (s *namespaceState) generateAndAddToken(tokenTTL time.Duration, rnd, clientID, server string, clientProtocolVersion int32) string { s.lock.Lock() defer s.lock.Unlock() validUntil := time.Now().Add(tokenTTL).UTC() token := fmt.Sprintf("%s|%s|%s|%s|%s", s.name, clientID, validUntil.Format(time.RFC3339), server, rnd) s.tokens[token] = &tokenState{ - clientID: clientID, - validUntil: validUntil, + clientID: clientID, + validUntil: validUntil, + clientProtocolVersion: clientProtocolVersion, } return token } From d452c9f6911e017fe4fa86994cb6a55d7214ccd5 Mon Sep 17 00:00:00 2001 From: Martin Weindel Date: Wed, 13 Jul 2022 15:52:21 +0200 Subject: [PATCH 3/7] adapt dnssources to deal with routing policy --- docs/aws-route53/README.md | 39 +++++++++ ...y-weighted.yaml => 41-entry-weighted.yaml} | 2 +- examples/51-ingress-weighted.yaml | 30 +++++++ examples/51-service-weighted.yaml | 21 +++++ .../crds/dns.gardener.cloud_dnsentries.yaml | 15 ++++ pkg/apis/dns/crds/zz_generated_crds.go | 15 ++++ pkg/apis/dns/v1alpha1/dnsentry.go | 3 + pkg/controller/source/dnsentry/handler.go | 20 +++-- pkg/controller/source/ingress/handler.go | 8 +- pkg/controller/source/service/handler.go | 3 +- pkg/dns/dnsset.go | 76 ---------------- pkg/dns/provider/changemodel.go | 4 +- pkg/dns/provider/state.go | 19 ++-- pkg/dns/recordsetname.go | 87 +++++++++++++++++++ pkg/dns/routingpolicy.go | 69 +++++++++++++++ pkg/dns/source/controller.go | 1 + pkg/dns/source/defaults.go | 3 +- pkg/dns/source/dnsinfo.go | 22 ++++- pkg/dns/source/interface.go | 32 ++++--- pkg/dns/source/reconciler.go | 69 ++++++++------- pkg/dns/utils/utils_entry.go | 17 ++++ 21 files changed, 402 insertions(+), 153 deletions(-) rename examples/{40-entry-weighted.yaml => 41-entry-weighted.yaml} (98%) create mode 100644 examples/51-ingress-weighted.yaml create mode 100644 examples/51-service-weighted.yaml create mode 100644 pkg/dns/recordsetname.go create mode 100644 pkg/dns/routingpolicy.go diff --git a/docs/aws-route53/README.md b/docs/aws-route53/README.md index 5640a1ee0..165865224 100644 --- a/docs/aws-route53/README.md +++ b/docs/aws-route53/README.md @@ -111,6 +111,45 @@ acting on the same domain names. Every record set needs a `SetIdentifier` which Weighted routing policy is supported for all record types, i.e. `A`, `AAAA`, `CNAME`, and `TXT`. All entries of the same domain name must have the same record type and TTL. +#### Annotating Ingress or Service Resources with Routing Policy + +To specify the routing policy, add an annotation `dns.gardener.cloud/routing-policy` +containing the routing policy section in JSON format to the `Ingress` or `Service` resource. +E.g. for an ingress resource: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + dns.gardener.cloud/dnsnames: '*' + # If you are delegating the DNS management to Gardener, uncomment the following line (see https://gardener.cloud/documentation/guides/administer_shoots/dns_names/) + #dns.gardener.cloud/class: garden + # If you are delegating the certificate management to Gardener, uncomment the following line (see https://gardener.cloud/documentation/guides/administer_shoots/x509_certificates/) + #cert.gardener.cloud/purpose: managed + # routing-policy annotation provides the `.spec.routingPolicy` section as JSON + # Note: Currently only supported for aws-route53 (see https://github.com/gardener/external-dns-management/tree/master/docs/aws-route53#weighted-routing-policy) + dns.gardener.cloud/routing-policy: '{"type": "weighted", "setIdentifier": "my-id", "parameters": {"weight": "10"}}' + name: test-ingress-weighted-routing-policy + namespace: default +spec: + rules: + - host: test.ingress.my-dns-domain.com + http: + paths: + - backend: + service: + name: my-service + port: + number: 9000 + path: / + pathType: Prefix + tls: + - hosts: + - test.ingress.my-dns-domain.com + #secretName: my-cert-secret-name +``` + #### Example for A/B testing You want to perform an A/B testing for a service using the domain name `my.service.example.com`. diff --git a/examples/40-entry-weighted.yaml b/examples/41-entry-weighted.yaml similarity index 98% rename from examples/40-entry-weighted.yaml rename to examples/41-entry-weighted.yaml index 6fe686faf..5d64b18fe 100644 --- a/examples/40-entry-weighted.yaml +++ b/examples/41-entry-weighted.yaml @@ -24,7 +24,7 @@ metadata: annotations: # If you are delegating the DNS management to Gardener Shoot DNS Service, uncomment the following line #dns.gardener.cloud/class: garden - name: instance-a + name: instance-b namespace: default spec: dnsName: "my.service.example.com" diff --git a/examples/51-ingress-weighted.yaml b/examples/51-ingress-weighted.yaml new file mode 100644 index 000000000..25c263e9b --- /dev/null +++ b/examples/51-ingress-weighted.yaml @@ -0,0 +1,30 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + dns.gardener.cloud/dnsnames: '*' + # If you are delegating the DNS management to Gardener, uncomment the following line (see https://gardener.cloud/documentation/guides/administer_shoots/dns_names/) + #dns.gardener.cloud/class: garden + # If you are delegating the certificate management to Gardener, uncomment the following line (see https://gardener.cloud/documentation/guides/administer_shoots/x509_certificates/) + #cert.gardener.cloud/purpose: managed + # routing-policy annotation provides the `.spec.routingPolicy` section as JSON + # Note: Currently only supported for aws-route53 (see https://github.com/gardener/external-dns-management/tree/master/docs/aws-route53#weighted-routing-policy) + dns.gardener.cloud/routing-policy: '{"type": "weighted", "setIdentifier": "my-id", "parameters": {"weight": "10"}}' + name: test-ingress-weighted-routing-policy + namespace: default +spec: + rules: + - host: test.ingress.my-dns-domain.com + http: + paths: + - backend: + service: + name: my-service + port: + number: 9000 + path: / + pathType: Prefix + tls: + - hosts: + - test.ingress.my-dns-domain.com + #secretName: my-cert-secret-name diff --git a/examples/51-service-weighted.yaml b/examples/51-service-weighted.yaml new file mode 100644 index 000000000..8cc025791 --- /dev/null +++ b/examples/51-service-weighted.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + dns.gardener.cloud/dnsnames: echo.my-dns-domain.com + dns.gardener.cloud/ttl: "500" + # If you are delegating the DNS Management to Gardener, uncomment the following line (see https://gardener.cloud/documentation/guides/administer_shoots/dns_names/) + #dns.gardener.cloud/class: garden + # routing-policy annotation provides the `.spec.routingPolicy` section as JSON + # Note: Currently only supported for aws-route53 (see https://github.com/gardener/external-dns-management/tree/master/docs/aws-route53#weighted-routing-policy) + dns.gardener.cloud/routing-policy: '{"type": "weighted", "setIdentifier": "my-id", "parameters": {"weight": "10"}}' + name: test-service-weighted + namespace: default +spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 8080 + sessionAffinity: None + type: LoadBalancer diff --git a/pkg/apis/dns/crds/dns.gardener.cloud_dnsentries.yaml b/pkg/apis/dns/crds/dns.gardener.cloud_dnsentries.yaml index 83db71029..56affc4b1 100644 --- a/pkg/apis/dns/crds/dns.gardener.cloud_dnsentries.yaml +++ b/pkg/apis/dns/crds/dns.gardener.cloud_dnsentries.yaml @@ -56,6 +56,21 @@ spec: name: ZONE priority: 2000 type: string + - description: routing policy type + jsonPath: .status.routingPolicy.type + name: POLICY_TYPE + priority: 2000 + type: string + - description: routing policy set identifier + jsonPath: .status.routingPolicy.setIdentifier + name: POLICY_SETID + priority: 2000 + type: string + - description: routing policy parameters + jsonPath: .status.routingPolicy.parameters + name: POLICY_PARAMS + priority: 2000 + type: string - description: message describing the reason for the state jsonPath: .status.message name: MESSAGE diff --git a/pkg/apis/dns/crds/zz_generated_crds.go b/pkg/apis/dns/crds/zz_generated_crds.go index b606e07c5..cc5c62e98 100644 --- a/pkg/apis/dns/crds/zz_generated_crds.go +++ b/pkg/apis/dns/crds/zz_generated_crds.go @@ -194,6 +194,21 @@ spec: name: ZONE priority: 2000 type: string + - description: routing policy type + jsonPath: .status.routingPolicy.type + name: POLICY_TYPE + priority: 2000 + type: string + - description: routing policy set identifier + jsonPath: .status.routingPolicy.setIdentifier + name: POLICY_SETID + priority: 2000 + type: string + - description: routing policy parameters + jsonPath: .status.routingPolicy.parameters + name: POLICY_PARAMS + priority: 2000 + type: string - description: message describing the reason for the state jsonPath: .status.message name: MESSAGE diff --git a/pkg/apis/dns/v1alpha1/dnsentry.go b/pkg/apis/dns/v1alpha1/dnsentry.go index 5d9e2be61..2ef51fe76 100644 --- a/pkg/apis/dns/v1alpha1/dnsentry.go +++ b/pkg/apis/dns/v1alpha1/dnsentry.go @@ -43,6 +43,9 @@ type DNSEntryList struct { // +kubebuilder:printcolumn:name=OWNERID,JSONPath=".spec.ownerId",type=string,description="owner id used to tag entries in external DNS system" // +kubebuilder:printcolumn:name=TTL,JSONPath=".status.ttl",type=integer,priority=2000,description="time to live" // +kubebuilder:printcolumn:name=ZONE,JSONPath=".status.zone",type=string,priority=2000,description="zone id" +// +kubebuilder:printcolumn:name=POLICY_TYPE,JSONPath=".status.routingPolicy.type",type=string,priority=2000,description="routing policy type" +// +kubebuilder:printcolumn:name=POLICY_SETID,JSONPath=".status.routingPolicy.setIdentifier",type=string,priority=2000,description="routing policy set identifier" +// +kubebuilder:printcolumn:name=POLICY_PARAMS,JSONPath=".status.routingPolicy.parameters",type=string,priority=2000,description="routing policy parameters" // +kubebuilder:printcolumn:name=MESSAGE,JSONPath=".status.message",type=string,priority=2000,description="message describing the reason for the state" // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/controller/source/dnsentry/handler.go b/pkg/controller/source/dnsentry/handler.go index f71ccd6c6..7f8b2530f 100644 --- a/pkg/controller/source/dnsentry/handler.go +++ b/pkg/controller/source/dnsentry/handler.go @@ -21,8 +21,9 @@ import ( "github.com/gardener/controller-manager-library/pkg/logger" "github.com/gardener/controller-manager-library/pkg/resources" "github.com/gardener/controller-manager-library/pkg/utils" - + "github.com/gardener/external-dns-management/pkg/dns" "github.com/gardener/external-dns-management/pkg/dns/source" + dnsutils "github.com/gardener/external-dns-management/pkg/dns/utils" api "github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1" ) @@ -54,15 +55,18 @@ func (this *DNSEntrySource) CreateDNSFeedback(obj resources.Object) source.DNSFe } func (this *DNSEntrySource) GetDNSInfo(logger logger.LogContext, obj resources.Object, current *source.DNSCurrentState) (*source.DNSInfo, error) { - data := obj.Data().(*api.DNSEntry) + entryObject := dnsutils.DNSEntry(obj) + name := entryObject.RecordSetName() + data := entryObject.DNSEntry() info := &source.DNSInfo{ - Names: utils.NewStringSet(data.Spec.DNSName), - Targets: utils.NewStringSetByArray(data.Spec.Targets), - Text: utils.NewStringSetByArray(data.Spec.Text), - OrigRef: data.Spec.Reference, - TTL: data.Spec.TTL, - Interval: data.Spec.CNameLookupInterval, + Names: dns.NewRecordNameSet(name), + Targets: utils.NewStringSetByArray(data.Spec.Targets), + Text: utils.NewStringSetByArray(data.Spec.Text), + OrigRef: data.Spec.Reference, + TTL: data.Spec.TTL, + Interval: data.Spec.CNameLookupInterval, + RoutingPolicy: data.Spec.RoutingPolicy, } return info, nil } diff --git a/pkg/controller/source/ingress/handler.go b/pkg/controller/source/ingress/handler.go index 6dc7c7184..06011f8fe 100644 --- a/pkg/controller/source/ingress/handler.go +++ b/pkg/controller/source/ingress/handler.go @@ -23,6 +23,7 @@ import ( "github.com/gardener/controller-manager-library/pkg/logger" "github.com/gardener/controller-manager-library/pkg/resources" "github.com/gardener/controller-manager-library/pkg/utils" + "github.com/gardener/external-dns-management/pkg/dns" "github.com/gardener/external-dns-management/pkg/dns/source" networkingv1 "k8s.io/api/networking/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" @@ -42,19 +43,20 @@ func (this *IngressSource) GetDNSInfo(logger logger.LogContext, obj resources.Ob if err != nil { return nil, err } - info.Names = utils.StringSet{} + names := utils.StringSet{} all := current.AnnotatedNames.Contains("all") || current.AnnotatedNames.Contains("*") for _, host := range hosts { if host != "" && (all || current.AnnotatedNames.Contains(host)) { - info.Names.Add(host) + names.Add(host) } } - _, del := current.AnnotatedNames.DiffFrom(info.Names) + _, del := current.AnnotatedNames.DiffFrom(names) del.Remove("all") del.Remove("*") if len(del) > 0 { return info, fmt.Errorf("annotated dns names %s not declared by ingress", del) } + info.Names = dns.NewRecordSetNameSetFromStringSet(names, current.SetIdentifier()) return info, nil } diff --git a/pkg/controller/source/service/handler.go b/pkg/controller/source/service/handler.go index 1903f43fe..dcf38a999 100644 --- a/pkg/controller/source/service/handler.go +++ b/pkg/controller/source/service/handler.go @@ -22,13 +22,14 @@ import ( "github.com/gardener/controller-manager-library/pkg/logger" "github.com/gardener/controller-manager-library/pkg/resources" "github.com/gardener/controller-manager-library/pkg/utils" + "github.com/gardener/external-dns-management/pkg/dns" api "k8s.io/api/core/v1" ) // FakeTargetIP provides target for testing without load balancer var FakeTargetIP *string -func GetTargets(logger logger.LogContext, obj resources.Object, names utils.StringSet) (utils.StringSet, utils.StringSet, error) { +func GetTargets(logger logger.LogContext, obj resources.Object, names dns.RecordSetNameSet) (utils.StringSet, utils.StringSet, error) { svc := obj.Data().(*api.Service) if svc.Spec.Type != api.ServiceTypeLoadBalancer { if len(names) == 0 { diff --git a/pkg/dns/dnsset.go b/pkg/dns/dnsset.go index d921afc8e..0c6ba345d 100644 --- a/pkg/dns/dnsset.go +++ b/pkg/dns/dnsset.go @@ -17,8 +17,6 @@ package dns import ( - "fmt" - "github.com/gardener/controller-manager-library/pkg/utils" api "github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1" @@ -122,80 +120,6 @@ const ( ATTR_LOCKID = "lockid" ) -type RecordSetName struct { - // domain name of the record - DNSName string - // optional set identifier (used for record with routing policy) - SetIdentifier string -} - -func (n RecordSetName) WithDNSName(dnsName string) RecordSetName { - return RecordSetName{DNSName: dnsName, SetIdentifier: n.SetIdentifier} -} - -func (n RecordSetName) String() string { - if n.SetIdentifier == "" { - return n.DNSName - } - return n.DNSName + "#" + n.SetIdentifier -} - -func (n RecordSetName) Align() RecordSetName { - return n.WithDNSName(AlignHostname(n.DNSName)) -} - -func (n RecordSetName) Normalize() RecordSetName { - return n.WithDNSName(NormalizeHostname(n.DNSName)) -} - -const ( - RoutingPolicyWeighted = "weighted" -) - -type RoutingPolicy struct { - Type string - Parameters map[string]string -} - -func NewRoutingPolicy(typ string, keyvalues ...string) *RoutingPolicy { - policy := &RoutingPolicy{Type: typ, Parameters: map[string]string{}} - for i := 0; i < len(keyvalues)-1; i += 2 { - policy.Parameters[keyvalues[i]] = keyvalues[i+1] - } - return policy -} - -func (p *RoutingPolicy) Clone() *RoutingPolicy { - if p == nil { - return nil - } - copy := &RoutingPolicy{Type: p.Type, Parameters: map[string]string{}} - for k, v := range p.Parameters { - copy.Parameters[k] = v - } - return copy -} - -func (p *RoutingPolicy) CheckParameterKeys(keys []string) error { - for _, k := range keys { - if _, ok := p.Parameters[k]; !ok { - return fmt.Errorf("Missing parameter key %s", k) - } - } - if len(keys) != len(p.Parameters) { - outer: - for k := range p.Parameters { - for _, k2 := range keys { - if k == k2 { - continue outer - } - } - return fmt.Errorf("Unsupported parameter key %s", k) - } - } - return nil -} - type DNSSet struct { Name RecordSetName Kind string diff --git a/pkg/dns/provider/changemodel.go b/pkg/dns/provider/changemodel.go index 4ffbfe9fc..d25facbe5 100644 --- a/pkg/dns/provider/changemodel.go +++ b/pkg/dns/provider/changemodel.go @@ -195,7 +195,7 @@ type ChangeModel struct { dangling *ChangeGroup providergroups map[string]*ChangeGroup zonestate DNSZoneState - failedDNSNames RecordSetNameSet + failedDNSNames dns.RecordSetNameSet } type ChangeResult struct { @@ -212,7 +212,7 @@ func NewChangeModel(logger logger.LogContext, ownership dns.Ownership, req *zone context: req, applied: map[dns.RecordSetName]*dns.DNSSet{}, providergroups: map[string]*ChangeGroup{}, - failedDNSNames: RecordSetNameSet{}, + failedDNSNames: dns.RecordSetNameSet{}, } } diff --git a/pkg/dns/provider/state.go b/pkg/dns/provider/state.go index 9b15bfb5d..95d7427ba 100644 --- a/pkg/dns/provider/state.go +++ b/pkg/dns/provider/state.go @@ -51,22 +51,11 @@ func (z ZonedRecordSetName) String() string { type RecordSetNames map[ZonedRecordSetName]*Entry -type RecordSetNameSet map[dns.RecordSetName]struct{} - -func (s RecordSetNameSet) Add(name dns.RecordSetName) { - s[name] = struct{}{} -} - -func (s RecordSetNameSet) Contains(name dns.RecordSetName) bool { - _, ok := s[name] - return ok -} - type zoneReconciliation struct { zone *dnsHostedZone providers DNSProviders entries Entries - equivEntries RecordSetNameSet + equivEntries dns.RecordSetNameSet ownership dns.Ownership stale RecordSetNames dedicated bool @@ -479,14 +468,16 @@ func (this *state) GetEntriesForZone(logger logger.LogContext, zoneid dns.ZoneID return entries, nil, false } -func (this *state) addEntriesForZone(logger logger.LogContext, entries Entries, stale RecordSetNames, zone DNSHostedZone) (Entries, RecordSetNameSet, RecordSetNames, bool) { +func (this *state) addEntriesForZone(logger logger.LogContext, entries Entries, stale RecordSetNames, + zone DNSHostedZone) (Entries, dns.RecordSetNameSet, RecordSetNames, bool) { + if entries == nil { entries = Entries{} } if stale == nil { stale = RecordSetNames{} } - equivEntries := RecordSetNameSet{} + equivEntries := dns.RecordSetNameSet{} deleting := true // TODO check domain := zone.Domain() // fallback if no forwarded domains are reported diff --git a/pkg/dns/recordsetname.go b/pkg/dns/recordsetname.go new file mode 100644 index 000000000..58ec50ff7 --- /dev/null +++ b/pkg/dns/recordsetname.go @@ -0,0 +1,87 @@ +/* + * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * + */ + +package dns + +import "github.com/gardener/controller-manager-library/pkg/utils" + +type RecordSetName struct { + // domain name of the record + DNSName string + // optional set identifier (used for record with routing policy) + SetIdentifier string +} + +func (n RecordSetName) WithDNSName(dnsName string) RecordSetName { + return RecordSetName{DNSName: dnsName, SetIdentifier: n.SetIdentifier} +} + +func (n RecordSetName) String() string { + if n.SetIdentifier == "" { + return n.DNSName + } + return n.DNSName + "#" + n.SetIdentifier +} + +func (n RecordSetName) Align() RecordSetName { + return n.WithDNSName(AlignHostname(n.DNSName)) +} + +func (n RecordSetName) Normalize() RecordSetName { + return n.WithDNSName(NormalizeHostname(n.DNSName)) +} + +type RecordSetNameSet map[RecordSetName]struct{} + +func NewRecordNameSet(names ...RecordSetName) RecordSetNameSet { + set := RecordSetNameSet{} + set.AddAll(names...) + return set +} + +func (s RecordSetNameSet) AddAll(names ...RecordSetName) { + for _, name := range names { + s.Add(name) + } +} + +func (s RecordSetNameSet) Add(name RecordSetName) { + s[name] = struct{}{} +} + +func (s RecordSetNameSet) Contains(name RecordSetName) bool { + _, ok := s[name] + return ok +} + +func (s RecordSetNameSet) IsEmpty() bool { + return len(s) == 0 +} + +func (s RecordSetNameSet) Remove(name RecordSetName) { + delete(s, name) +} + +func NewRecordSetNameSetFromStringSet(dnsNames utils.StringSet, setIdentifier string) RecordSetNameSet { + set := RecordSetNameSet{} + for dnsname := range dnsNames { + set.Add(RecordSetName{ + DNSName: dnsname, + SetIdentifier: setIdentifier, + }) + } + return set +} diff --git a/pkg/dns/routingpolicy.go b/pkg/dns/routingpolicy.go new file mode 100644 index 000000000..4eac06991 --- /dev/null +++ b/pkg/dns/routingpolicy.go @@ -0,0 +1,69 @@ +/* + * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * + */ + +package dns + +import ( + "fmt" +) + +const ( + RoutingPolicyWeighted = "weighted" +) + +type RoutingPolicy struct { + Type string + Parameters map[string]string +} + +func NewRoutingPolicy(typ string, keyvalues ...string) *RoutingPolicy { + policy := &RoutingPolicy{Type: typ, Parameters: map[string]string{}} + for i := 0; i < len(keyvalues)-1; i += 2 { + policy.Parameters[keyvalues[i]] = keyvalues[i+1] + } + return policy +} + +func (p *RoutingPolicy) Clone() *RoutingPolicy { + if p == nil { + return nil + } + copy := &RoutingPolicy{Type: p.Type, Parameters: map[string]string{}} + for k, v := range p.Parameters { + copy.Parameters[k] = v + } + return copy +} + +func (p *RoutingPolicy) CheckParameterKeys(keys []string) error { + for _, k := range keys { + if _, ok := p.Parameters[k]; !ok { + return fmt.Errorf("Missing parameter key %s", k) + } + } + if len(keys) != len(p.Parameters) { + outer: + for k := range p.Parameters { + for _, k2 := range keys { + if k == k2 { + continue outer + } + } + return fmt.Errorf("Unsupported parameter key %s", k) + } + } + return nil +} diff --git a/pkg/dns/source/controller.go b/pkg/dns/source/controller.go index cc6f5a623..3885bd291 100644 --- a/pkg/dns/source/controller.go +++ b/pkg/dns/source/controller.go @@ -42,6 +42,7 @@ const TARGET_CLUSTER = "target" const DNS_ANNOTATION = dns.ANNOTATION_GROUP + "/dnsnames" const TTL_ANNOTATION = dns.ANNOTATION_GROUP + "/ttl" const PERIOD_ANNOTATION = dns.ANNOTATION_GROUP + "/cname-lookup-interval" +const ROUTING_POLICY_ANNOTATION = dns.ANNOTATION_GROUP + "/routing-policy" const CLASS_ANNOTATION = dns.CLASS_ANNOTATION const OPT_CLASS = "dns-class" diff --git a/pkg/dns/source/defaults.go b/pkg/dns/source/defaults.go index 554668e04..81373a0ab 100644 --- a/pkg/dns/source/defaults.go +++ b/pkg/dns/source/defaults.go @@ -20,6 +20,7 @@ import ( "sync" "github.com/gardener/controller-manager-library/pkg/controllermanager/controller" + "github.com/gardener/external-dns-management/pkg/dns" "k8s.io/apimachinery/pkg/runtime/schema" "github.com/gardener/controller-manager-library/pkg/controllermanager/controller/reconcile" @@ -92,7 +93,7 @@ func (this *DefaultDNSSource) CreateDNSFeedback(obj resources.Object) DNSFeedbac func (this *DefaultDNSSource) GetDNSInfo(logger logger.LogContext, obj resources.Object, current *DNSCurrentState) (*DNSInfo, error) { info := &DNSInfo{} - info.Names = current.AnnotatedNames + info.Names = dns.NewRecordSetNameSetFromStringSet(current.AnnotatedNames, current.SetIdentifier()) tgts, txts, err := this.handler(logger, obj, info.Names) info.Targets = tgts info.Text = txts diff --git a/pkg/dns/source/dnsinfo.go b/pkg/dns/source/dnsinfo.go index 92982bc8d..939924d76 100644 --- a/pkg/dns/source/dnsinfo.go +++ b/pkg/dns/source/dnsinfo.go @@ -17,6 +17,7 @@ package source import ( + "encoding/json" "fmt" "strconv" "strings" @@ -24,18 +25,20 @@ import ( "github.com/gardener/controller-manager-library/pkg/logger" "github.com/gardener/controller-manager-library/pkg/resources" "github.com/gardener/controller-manager-library/pkg/utils" + "github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1" + "github.com/gardener/external-dns-management/pkg/dns" ) -func (this *sourceReconciler) exclude(dns string) bool { - if this.excluded.Contains(dns) { +func (this *sourceReconciler) exclude(name dns.RecordSetName) bool { + if this.excluded.Contains(name.DNSName) { return true } for d := range this.excluded { if strings.HasPrefix(d, "*.") { d = d[2:] - i := strings.Index(dns, ".") + i := strings.Index(name.DNSName, ".") if i >= 0 { - if d == dns[i+1:] { + if d == name.DNSName[i+1:] { return true } } @@ -54,6 +57,14 @@ func (this *sourceReconciler) getDNSInfo(logger logger.LogContext, obj resources annos := obj.GetAnnotations() current.AnnotatedNames = utils.StringSet{} current.AnnotatedNames.AddAllSplittedSelected(annos[DNS_ANNOTATION], utils.StandardNonEmptyStringElement) + current.AnnotatedRoutingPolicy = nil + if a := annos[ROUTING_POLICY_ANNOTATION]; a != "" { + policy := &v1alpha1.RoutingPolicy{} + if err := json.Unmarshal([]byte(a), policy); err != nil { + return nil, true, err + } + current.AnnotatedRoutingPolicy = policy + } info, err := s.GetDNSInfo(logger, obj, current) if info != nil && info.Names != nil { @@ -93,6 +104,9 @@ func (this *sourceReconciler) getDNSInfo(logger logger.LogContext, obj resources } } } + if info.RoutingPolicy == nil { + info.RoutingPolicy = current.AnnotatedRoutingPolicy + } return info, true, nil } diff --git a/pkg/dns/source/interface.go b/pkg/dns/source/interface.go index 7bd999443..f7f63cf96 100644 --- a/pkg/dns/source/interface.go +++ b/pkg/dns/source/interface.go @@ -22,6 +22,7 @@ import ( "github.com/gardener/controller-manager-library/pkg/logger" "github.com/gardener/controller-manager-library/pkg/resources" "github.com/gardener/controller-manager-library/pkg/utils" + "github.com/gardener/external-dns-management/pkg/dns" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -29,13 +30,14 @@ import ( ) type DNSInfo struct { - Names utils.StringSet - TTL *int64 - Interval *int64 - Targets utils.StringSet - Text utils.StringSet - OrigRef *v1alpha1.EntryReference - TargetRef *v1alpha1.EntryReference + Names dns.RecordSetNameSet + TTL *int64 + Interval *int64 + Targets utils.StringSet + Text utils.StringSet + OrigRef *v1alpha1.EntryReference + TargetRef *v1alpha1.EntryReference + RoutingPolicy *v1alpha1.RoutingPolicy } type DNSFeedback interface { @@ -64,7 +66,7 @@ type DNSSourceType interface { Create(controller.Interface) (DNSSource, error) } -type DNSTargetExtractor func(logger logger.LogContext, obj resources.Object, names utils.StringSet) (utils.StringSet, utils.StringSet, error) +type DNSTargetExtractor func(logger logger.LogContext, obj resources.Object, names dns.RecordSetNameSet) (targets utils.StringSet, texts utils.StringSet, err error) type DNSSourceCreator func(controller.Interface) (DNSSource, error) type DNSState struct { @@ -73,9 +75,17 @@ type DNSState struct { } type DNSCurrentState struct { - Names map[string]*DNSState - Targets utils.StringSet - AnnotatedNames utils.StringSet + Names map[dns.RecordSetName]*DNSState + Targets utils.StringSet + AnnotatedNames utils.StringSet + AnnotatedRoutingPolicy *v1alpha1.RoutingPolicy +} + +func (s *DNSCurrentState) SetIdentifier() string { + if s.AnnotatedRoutingPolicy == nil { + return "" + } + return s.AnnotatedRoutingPolicy.SetIdentifier } func NewDNSSouceTypeForExtractor(name string, kind schema.GroupKind, handler DNSTargetExtractor) DNSSourceType { diff --git a/pkg/dns/source/reconciler.go b/pkg/dns/source/reconciler.go index f785ec33d..762a3bca8 100644 --- a/pkg/dns/source/reconciler.go +++ b/pkg/dns/source/reconciler.go @@ -18,6 +18,7 @@ package source import ( "fmt" + "reflect" "strings" "time" @@ -139,14 +140,13 @@ func (this *sourceReconciler) Setup() error { func (this *sourceReconciler) Reconcile(logger logger.LogContext, obj resources.Object) reconcile.Status { slaves := this.LookupSlaves(obj.ClusterKey()) - names := utils.StringSet{} + names := dns.RecordSetNameSet{} for _, s := range slaves { - e := dnsutils.DNSEntry(s).DNSEntry() - names.Add(e.Spec.DNSName) + names.Add(dnsutils.DNSEntry(s).RecordSetName()) } - found := &DNSCurrentState{Names: map[string]*DNSState{}, Targets: utils.StringSet{}} + found := &DNSCurrentState{Names: map[dns.RecordSetName]*DNSState{}, Targets: utils.StringSet{}} for n := range names { - s := this.AssertSingleSlave(logger, obj.ClusterKey(), slaves, dns.DNSNameMatcher(n)) + s := this.AssertSingleSlave(logger, obj.ClusterKey(), slaves, dnsutils.RecordSetNameMatcher(n)) e := dnsutils.DNSEntry(s).DNSEntry() found.Names[n] = &DNSState{DNSEntryStatus: e.Status, CreationTimestamp: e.CreationTimestamp} found.Targets.AddAll(e.Spec.Targets) @@ -186,9 +186,9 @@ func (this *sourceReconciler) Reconcile(logger logger.LogContext, obj resources. return reconcile.Delay(logger, err) } } - missing := utils.StringSet{} + missing := dns.RecordSetNameSet{} obsolete := []resources.Object{} - obsolete_dns := utils.StringSet{} + obsolete_dns := dns.RecordSetNameSet{} current := []resources.Object{} @@ -205,35 +205,35 @@ func (this *sourceReconciler) Reconcile(logger logger.LogContext, obj resources. } logger.Debugf("found names: %s", info.Names) outer: - for dnsname := range info.Names { + for name := range info.Names { for _, s := range slaves { - found := dnsutils.DNSEntry(s).DNSEntry().Spec.DNSName - if found == dnsname { + slaveName := dnsutils.DNSEntry(s).RecordSetName() + if slaveName == name { continue outer } } - missing.Add(dnsname) + missing.Add(name) } for _, s := range slaves { - dnsname := dnsutils.DNSEntry(s).DNSEntry().Spec.DNSName - if !info.Names.Contains(dnsname) { + slaveName := dnsutils.DNSEntry(s).RecordSetName() + if !info.Names.Contains(slaveName) { obsolete = append(obsolete, s) - obsolete_dns.Add(dnsname) + obsolete_dns.Add(slaveName) } else { current = append(current, s) } } var notifiedErrors []string - modified := map[string]bool{} + modified := map[dns.RecordSetName]bool{} if len(missing) > 0 { if len(info.Targets) > 0 || len(info.Text) > 0 || info.OrigRef != nil { logger.Infof("found missing dns entries: %s", missing) - for dnsname := range missing { - err := this.createEntryFor(logger, obj, dnsname, info, feedback) + for name := range missing { + err := this.createEntryFor(logger, obj, name, info, feedback) if err != nil { - notifiedErrors = append(notifiedErrors, fmt.Sprintf("cannot create dns entry object for %s: %s ", dnsname, err)) + notifiedErrors = append(notifiedErrors, fmt.Sprintf("cannot create dns entry object for %s: %s ", name, err)) } } } else { @@ -243,21 +243,21 @@ outer: if len(obsolete_dns) > 0 { logger.Infof("found obsolete dns entries: %s", obsolete_dns) for _, o := range obsolete { - dnsname := dnsutils.DNSEntry(o).DNSEntry().Spec.DNSName - err := this.deleteEntry(logger, obj, o, dnsname, feedback) + name := dnsutils.DNSEntry(o).RecordSetName() + err := this.deleteEntry(logger, o, name, feedback) if err != nil { - notifiedErrors = append(notifiedErrors, fmt.Sprintf("cannot remove dns entry object %q(%s): %s", o.ClusterKey(), dnsname, err)) + notifiedErrors = append(notifiedErrors, fmt.Sprintf("cannot remove dns entry object %q(%s): %s", o.ClusterKey(), name, err)) } } } if len(current) > 0 { for _, o := range current { - dnsname := dnsutils.DNSEntry(o).DNSEntry().Spec.DNSName + name := dnsutils.DNSEntry(o).RecordSetName() mod, err := this.updateEntryFor(logger, obj, info, o) - modified[dnsname] = mod + modified[name] = mod if err != nil { - notifiedErrors = append(notifiedErrors, fmt.Sprintf("cannot update dns entry object %q(%s): %s", o.ClusterKey(), dnsname, err)) + notifiedErrors = append(notifiedErrors, fmt.Sprintf("cannot update dns entry object %q(%s): %s", o.ClusterKey(), name, err)) } } } @@ -285,7 +285,7 @@ outer: case api.STATE_READY: default: if s.CreationTimestamp.Time.Before(threshold) { - feedback.Pending(logger, n, "no dns controller running?", s) + feedback.Pending(logger, n.String(), "no dns controller running?", s) } } } @@ -410,7 +410,7 @@ func (this *sourceReconciler) mapRef(obj resources.Object, info *DNSInfo) { } } -func (this *sourceReconciler) createEntryFor(logger logger.LogContext, obj resources.Object, dnsname string, info *DNSInfo, feedback DNSFeedback) error { +func (this *sourceReconciler) createEntryFor(logger logger.LogContext, obj resources.Object, name dns.RecordSetName, info *DNSInfo, feedback DNSFeedback) error { entry := &api.DNSEntry{} entry.GenerateName = strings.ToLower(this.nameprefix + obj.GetName() + "-" + obj.GroupKind().Kind + "-") if !this.targetclasses.IsDefault() { @@ -428,7 +428,7 @@ func (this *sourceReconciler) createEntryFor(logger logger.LogContext, obj resou if this.state.ownerState.ownerId != "" { entry.Spec.OwnerId = &this.state.ownerState.ownerId } - entry.Spec.DNSName = dnsname + entry.Spec.DNSName = name.DNSName this.mapRef(obj, info) if info.TargetRef != nil { if info.OrigRef != nil { @@ -450,23 +450,24 @@ func (this *sourceReconciler) createEntryFor(logger logger.LogContext, obj resou entry.Namespace = this.namespace } entry.Spec.TTL = info.TTL + entry.Spec.RoutingPolicy = info.RoutingPolicy e, _ := this.SlaveResoures()[0].Wrap(entry) err := this.Slaves().CreateSlave(obj, e) if err != nil { if feedback != nil { - feedback.Failed(logger, dnsname, err, nil) + feedback.Failed(logger, name.String(), err, nil) } return err } if feedback != nil { - feedback.Created(logger, dnsname, e.ObjectName()) + feedback.Created(logger, name.String(), e.ObjectName()) } else { logger.Infof("created dns entry object %s", e.ObjectName()) } if feedback != nil { - feedback.Pending(logger, dnsname, "", nil) + feedback.Pending(logger, name.String(), "", nil) } return nil } @@ -511,6 +512,10 @@ func (this *sourceReconciler) updateEntryFor(logger logger.LogContext, obj resou } mod.AssureStringPtrPtr(&spec.OwnerId, p) mod.AssureInt64PtrPtr(&spec.TTL, info.TTL) + if !reflect.DeepEqual(spec.RoutingPolicy, info.RoutingPolicy) { + spec.RoutingPolicy = info.RoutingPolicy + mod.Modify(true) + } mod.AssureInt64PtrPtr(&spec.CNameLookupInterval, info.Interval) targets := info.Targets text := info.Text @@ -540,12 +545,12 @@ func (this *sourceReconciler) updateEntryFor(logger logger.LogContext, obj resou return slave.Modify(f) } -func (this *sourceReconciler) deleteEntry(logger logger.LogContext, obj resources.Object, e resources.Object, dnsname string, feedback DNSFeedback) error { +func (this *sourceReconciler) deleteEntry(logger logger.LogContext, e resources.Object, name dns.RecordSetName, feedback DNSFeedback) error { err := e.Delete() if err == nil { msg := fmt.Sprintf("deleted dns entry object %s", e.ObjectName()) if feedback != nil { - feedback.Deleted(logger, dnsname, msg) + feedback.Deleted(logger, name.String(), msg) } else { logger.Info(msg) } diff --git a/pkg/dns/utils/utils_entry.go b/pkg/dns/utils/utils_entry.go index dbadc95f1..3c9059191 100644 --- a/pkg/dns/utils/utils_entry.go +++ b/pkg/dns/utils/utils_entry.go @@ -139,3 +139,20 @@ func (this *DNSEntryObject) AcknowledgeRoutingPolicy(policy *dns.RoutingPolicy) func (this *DNSEntryObject) GetTargetSpec(p TargetProvider) TargetSpec { return BaseTargetSpec(this, p) } + +func (this *DNSEntryObject) RecordSetName() dns.RecordSetName { + setIdentifier := "" + if this.Spec().RoutingPolicy != nil { + setIdentifier = this.Spec().RoutingPolicy.SetIdentifier + } + return dns.RecordSetName{ + DNSName: this.GetDNSName(), + SetIdentifier: setIdentifier, + } +} + +func RecordSetNameMatcher(name dns.RecordSetName) resources.ObjectMatcher { + return func(o resources.Object) bool { + return DNSEntry(o).RecordSetName() == name + } +} From 5519661f9a90b11ae60bbf7b4c767b71daecca2b Mon Sep 17 00:00:00 2001 From: Martin Weindel Date: Thu, 14 Jul 2022 09:46:33 +0200 Subject: [PATCH 4/7] renamed RecordSetName to DNSSetName --- pkg/controller/provider/aws/aliastarget.go | 2 +- pkg/controller/provider/aws/execution.go | 10 +++--- pkg/controller/provider/aws/handler.go | 6 ++-- pkg/controller/provider/aws/routingpolicy.go | 2 +- .../provider/openstack/handler_test.go | 10 +++--- pkg/controller/source/dnsentry/handler.go | 4 +-- pkg/controller/source/ingress/handler.go | 2 +- pkg/controller/source/service/handler.go | 2 +- pkg/dns/dnsset.go | 14 ++++---- pkg/dns/{recordsetname.go => dnssetname.go} | 34 +++++++++---------- pkg/dns/mapping.go | 4 +-- pkg/dns/mapping_test.go | 6 ++-- pkg/dns/provider/changemodel.go | 30 ++++++++-------- pkg/dns/provider/dedicatedrecord.go | 20 +++++------ pkg/dns/provider/entry.go | 10 +++--- pkg/dns/provider/errors/errors.go | 2 +- pkg/dns/provider/inmemory.go | 2 +- pkg/dns/provider/raw/execution.go | 10 +++--- pkg/dns/provider/raw/records.go | 10 +++--- pkg/dns/provider/state.go | 32 ++++++++--------- pkg/dns/provider/state_entry.go | 6 ++-- pkg/dns/provider/state_provider.go | 2 +- pkg/dns/provider/state_zone.go | 10 +++--- pkg/dns/source/defaults.go | 2 +- pkg/dns/source/dnsinfo.go | 2 +- pkg/dns/source/interface.go | 6 ++-- pkg/dns/source/reconciler.go | 26 +++++++------- pkg/dns/utils/utils_entry.go | 8 ++--- pkg/server/remote/conversion/conversion.go | 14 ++++---- .../remote/conversion/conversion_test.go | 8 ++--- test/integration/testenv.go | 2 +- 31 files changed, 149 insertions(+), 149 deletions(-) rename pkg/dns/{recordsetname.go => dnssetname.go} (62%) diff --git a/pkg/controller/provider/aws/aliastarget.go b/pkg/controller/provider/aws/aliastarget.go index 862afb1c1..1e1749813 100644 --- a/pkg/controller/provider/aws/aliastarget.go +++ b/pkg/controller/provider/aws/aliastarget.go @@ -98,7 +98,7 @@ func buildRecordSetFromAliasTarget(r *route53.ResourceRecordSet) *dns.RecordSet return rs } -func buildResourceRecordSetForAliasTarget(name dns.RecordSetName, rset *dns.RecordSet) (*route53.ResourceRecordSet, error) { +func buildResourceRecordSetForAliasTarget(name dns.DNSSetName, rset *dns.RecordSet) (*route53.ResourceRecordSet, error) { target := dns.NormalizeHostname(rset.Records[0].Value) hostedZone := canonicalHostedZone(target) if hostedZone == "" { diff --git a/pkg/controller/provider/aws/execution.go b/pkg/controller/provider/aws/execution.go index 5e066a59d..7ff1c0af6 100644 --- a/pkg/controller/provider/aws/execution.go +++ b/pkg/controller/provider/aws/execution.go @@ -44,7 +44,7 @@ type Execution struct { rateLimiter flowcontrol.RateLimiter zone provider.DNSHostedZone - changes map[dns.RecordSetName][]*Change + changes map[dns.DNSSetName][]*Change batchSize int } @@ -54,12 +54,12 @@ func NewExecution(logger logger.LogContext, h *Handler, zone provider.DNSHostedZ r53: h.r53, rateLimiter: h.config.RateLimiter, zone: zone, - changes: map[dns.RecordSetName][]*Change{}, + changes: map[dns.DNSSetName][]*Change{}, batchSize: h.awsConfig.BatchSize, } } -func buildResourceRecordSet(name dns.RecordSetName, rset *dns.RecordSet) (*route53.ResourceRecordSet, error) { +func buildResourceRecordSet(name dns.DNSSetName, rset *dns.RecordSet) (*route53.ResourceRecordSet, error) { rrs := &route53.ResourceRecordSet{} rrs.Name = aws.String(name.DNSName) rrs.Type = aws.String(rset.Type) @@ -101,7 +101,7 @@ func (this *Execution) addChange(action string, req *provider.ChangeRequest, dns return nil } -func (this *Execution) addRawChange(name dns.RecordSetName, updateGroup string, change *route53.Change, done provider.DoneHandler) { +func (this *Execution) addRawChange(name dns.DNSSetName, updateGroup string, change *route53.Change, done provider.DoneHandler) { this.changes[name] = append(this.changes[name], &Change{Change: change, Done: done, UpdateGroup: updateGroup}) } @@ -258,7 +258,7 @@ func safeCompareInt64(a, b *int64) bool { return *a == *b } -func limitChangeSet(changesByName map[dns.RecordSetName][]*Change, max int) [][]*Change { +func limitChangeSet(changesByName map[dns.DNSSetName][]*Change, max int) [][]*Change { batches := [][]*Change{} updateChanges := map[string][]*Change{} diff --git a/pkg/controller/provider/aws/handler.go b/pkg/controller/provider/aws/handler.go index 572ae575c..4c8d660d3 100644 --- a/pkg/controller/provider/aws/handler.go +++ b/pkg/controller/provider/aws/handler.go @@ -218,7 +218,7 @@ func (h *Handler) getZoneState(zone provider.DNSHostedZone, cache provider.ZoneC } else { rs = buildRecordSet(r) } - dnssets.AddRecordSetFromProviderEx(dns.RecordSetName{DNSName: aws.StringValue(r.Name), SetIdentifier: aws.StringValue(r.SetIdentifier)}, rs) + dnssets.AddRecordSetFromProviderEx(dns.DNSSetName{DNSName: aws.StringValue(r.Name), SetIdentifier: aws.StringValue(r.SetIdentifier)}, rs) } } forwarded, err := h.handleRecordSets(zone, aggr) @@ -388,7 +388,7 @@ func (h *Handler) DeleteVPCAssociationAuthorization(hostedZoneId string, vpcId s return out, nil } -func (h *Handler) GetRecordSet(zone provider.DNSHostedZone, rsName dns.RecordSetName, recordType string) (provider.DedicatedRecordSet, error) { +func (h *Handler) GetRecordSet(zone provider.DNSHostedZone, rsName dns.DNSSetName, recordType string) (provider.DedicatedRecordSet, error) { name := rsName.Align() var recordIdentifier *string if rsName.SetIdentifier != "" { @@ -414,7 +414,7 @@ func (h *Handler) GetRecordSet(zone provider.DNSHostedZone, rsName dns.RecordSet } else { rs = buildRecordSet(r) } - rsName := dns.RecordSetName{DNSName: aws.StringValue(r.Name), SetIdentifier: aws.StringValue(r.SetIdentifier)} + rsName := dns.DNSSetName{DNSName: aws.StringValue(r.Name), SetIdentifier: aws.StringValue(r.SetIdentifier)} dnssets.AddRecordSetFromProviderEx(rsName, rs) } } diff --git a/pkg/controller/provider/aws/routingpolicy.go b/pkg/controller/provider/aws/routingpolicy.go index cd4a056a3..7d89fe7a2 100644 --- a/pkg/controller/provider/aws/routingpolicy.go +++ b/pkg/controller/provider/aws/routingpolicy.go @@ -25,7 +25,7 @@ import ( "github.com/gardener/external-dns-management/pkg/dns" ) -func addRoutingPolicy(rrset *route53.ResourceRecordSet, name dns.RecordSetName, routingPolicy *dns.RoutingPolicy) error { +func addRoutingPolicy(rrset *route53.ResourceRecordSet, name dns.DNSSetName, routingPolicy *dns.RoutingPolicy) error { if name.SetIdentifier == "" && routingPolicy == nil { return nil } diff --git a/pkg/controller/provider/openstack/handler_test.go b/pkg/controller/provider/openstack/handler_test.go index 4f89fa93b..9f1ae2cd7 100644 --- a/pkg/controller/provider/openstack/handler_test.go +++ b/pkg/controller/provider/openstack/handler_test.go @@ -323,9 +323,9 @@ func TestGetZoneStateAndExecuteRequests(t *testing.T) { } stdMeta := buildRecordSet("META", 600, "\"owner=test\"", "\"prefix=comment-\"") - sub1 := dns.RecordSetName{DNSName: "sub1.z1.test"} - sub2 := dns.RecordSetName{DNSName: "sub2.z1.test"} - sub3 := dns.RecordSetName{DNSName: "sub3.z1.test"} + sub1 := dns.DNSSetName{DNSName: "sub1.z1.test"} + sub2 := dns.DNSSetName{DNSName: "sub2.z1.test"} + sub3 := dns.DNSSetName{DNSName: "sub3.z1.test"} expectedDnssets := dns.DNSSets{ sub1: &dns.DNSSet{ Name: sub1, @@ -341,7 +341,7 @@ func TestGetZoneStateAndExecuteRequests(t *testing.T) { "META": stdMeta, }, }, - dns.RecordSetName{DNSName: "sub3.z1.test"}: &dns.DNSSet{ + dns.DNSSetName{DNSName: "sub3.z1.test"}: &dns.DNSSet{ Name: sub3, Sets: dns.RecordSets{ "TXT": buildRecordSet("TXT", 303, "foo", "bar"), @@ -355,7 +355,7 @@ func TestGetZoneStateAndExecuteRequests(t *testing.T) { Ω(actualDnssets).Should(Equal(expectedDnssets)) tlog := logger.New() - sub4 := dns.RecordSetName{DNSName: "sub4.z1.test"} + sub4 := dns.DNSSetName{DNSName: "sub4.z1.test"} reqs := []*provider.ChangeRequest{ { Action: provider.R_CREATE, diff --git a/pkg/controller/source/dnsentry/handler.go b/pkg/controller/source/dnsentry/handler.go index 7f8b2530f..66a3441b3 100644 --- a/pkg/controller/source/dnsentry/handler.go +++ b/pkg/controller/source/dnsentry/handler.go @@ -56,11 +56,11 @@ func (this *DNSEntrySource) CreateDNSFeedback(obj resources.Object) source.DNSFe func (this *DNSEntrySource) GetDNSInfo(logger logger.LogContext, obj resources.Object, current *source.DNSCurrentState) (*source.DNSInfo, error) { entryObject := dnsutils.DNSEntry(obj) - name := entryObject.RecordSetName() + name := entryObject.DNSSetName() data := entryObject.DNSEntry() info := &source.DNSInfo{ - Names: dns.NewRecordNameSet(name), + Names: dns.NewDNSNameSet(name), Targets: utils.NewStringSetByArray(data.Spec.Targets), Text: utils.NewStringSetByArray(data.Spec.Text), OrigRef: data.Spec.Reference, diff --git a/pkg/controller/source/ingress/handler.go b/pkg/controller/source/ingress/handler.go index 06011f8fe..f5e2d4fd7 100644 --- a/pkg/controller/source/ingress/handler.go +++ b/pkg/controller/source/ingress/handler.go @@ -56,7 +56,7 @@ func (this *IngressSource) GetDNSInfo(logger logger.LogContext, obj resources.Ob if len(del) > 0 { return info, fmt.Errorf("annotated dns names %s not declared by ingress", del) } - info.Names = dns.NewRecordSetNameSetFromStringSet(names, current.SetIdentifier()) + info.Names = dns.NewDNSNameSetFromStringSet(names, current.SetIdentifier()) return info, nil } diff --git a/pkg/controller/source/service/handler.go b/pkg/controller/source/service/handler.go index dcf38a999..35553842a 100644 --- a/pkg/controller/source/service/handler.go +++ b/pkg/controller/source/service/handler.go @@ -29,7 +29,7 @@ import ( // FakeTargetIP provides target for testing without load balancer var FakeTargetIP *string -func GetTargets(logger logger.LogContext, obj resources.Object, names dns.RecordSetNameSet) (utils.StringSet, utils.StringSet, error) { +func GetTargets(logger logger.LogContext, obj resources.Object, names dns.DNSNameSet) (utils.StringSet, utils.StringSet, error) { svc := obj.Data().(*api.Service) if svc.Spec.Type != api.ServiceTypeLoadBalancer { if len(names) == 0 { diff --git a/pkg/dns/dnsset.go b/pkg/dns/dnsset.go index 0c6ba345d..ff9886bd9 100644 --- a/pkg/dns/dnsset.go +++ b/pkg/dns/dnsset.go @@ -53,7 +53,7 @@ import ( // or writing a record set, respectively. The map the given set to // an effective set and dns name for the desired purpose. -type DNSSets map[RecordSetName]*DNSSet +type DNSSets map[DNSSetName]*DNSSet type Ownership interface { IsResponsibleFor(id string) bool @@ -61,17 +61,17 @@ type Ownership interface { } func (dnssets DNSSets) AddRecordSetFromProvider(dnsName string, rs *RecordSet) { - dnssets.AddRecordSetFromProviderEx(RecordSetName{DNSName: dnsName}, rs) + dnssets.AddRecordSetFromProviderEx(DNSSetName{DNSName: dnsName}, rs) } -func (dnssets DNSSets) AddRecordSetFromProviderEx(rsName RecordSetName, rs *RecordSet) { +func (dnssets DNSSets) AddRecordSetFromProviderEx(rsName DNSSetName, rs *RecordSet) { name := rsName.Normalize() name, rs = MapFromProvider(name, rs) dnssets.AddRecordSet(name, rs) } -func (dnssets DNSSets) AddRecordSet(name RecordSetName, rs *RecordSet) { +func (dnssets DNSSets) AddRecordSet(name DNSSetName, rs *RecordSet) { dnsset := dnssets[name] if dnsset == nil { dnsset = NewDNSSet(name) @@ -80,7 +80,7 @@ func (dnssets DNSSets) AddRecordSet(name RecordSetName, rs *RecordSet) { dnsset.Sets[rs.Type] = rs } -func (dnssets DNSSets) RemoveRecordSet(name RecordSetName, recordSetType string) { +func (dnssets DNSSets) RemoveRecordSet(name DNSSetName, recordSetType string) { dnsset := dnssets[name] if dnsset != nil { delete(dnsset.Sets, recordSetType) @@ -121,7 +121,7 @@ const ( ) type DNSSet struct { - Name RecordSetName + Name DNSSetName Kind string UpdateGroup string Sets RecordSets @@ -229,6 +229,6 @@ func (this *DNSSet) SetRecordSet(rtype string, ttl int64, routingPolicy *Routing this.Sets[rtype] = &RecordSet{Type: rtype, TTL: ttl, IgnoreTTL: false, RoutingPolicy: routingPolicy, Records: records} } -func NewDNSSet(name RecordSetName) *DNSSet { +func NewDNSSet(name DNSSetName) *DNSSet { return &DNSSet{Name: name, Sets: map[string]*RecordSet{}} } diff --git a/pkg/dns/recordsetname.go b/pkg/dns/dnssetname.go similarity index 62% rename from pkg/dns/recordsetname.go rename to pkg/dns/dnssetname.go index 58ec50ff7..4a2b9d744 100644 --- a/pkg/dns/recordsetname.go +++ b/pkg/dns/dnssetname.go @@ -18,67 +18,67 @@ package dns import "github.com/gardener/controller-manager-library/pkg/utils" -type RecordSetName struct { +type DNSSetName struct { // domain name of the record DNSName string // optional set identifier (used for record with routing policy) SetIdentifier string } -func (n RecordSetName) WithDNSName(dnsName string) RecordSetName { - return RecordSetName{DNSName: dnsName, SetIdentifier: n.SetIdentifier} +func (n DNSSetName) WithDNSName(dnsName string) DNSSetName { + return DNSSetName{DNSName: dnsName, SetIdentifier: n.SetIdentifier} } -func (n RecordSetName) String() string { +func (n DNSSetName) String() string { if n.SetIdentifier == "" { return n.DNSName } return n.DNSName + "#" + n.SetIdentifier } -func (n RecordSetName) Align() RecordSetName { +func (n DNSSetName) Align() DNSSetName { return n.WithDNSName(AlignHostname(n.DNSName)) } -func (n RecordSetName) Normalize() RecordSetName { +func (n DNSSetName) Normalize() DNSSetName { return n.WithDNSName(NormalizeHostname(n.DNSName)) } -type RecordSetNameSet map[RecordSetName]struct{} +type DNSNameSet map[DNSSetName]struct{} -func NewRecordNameSet(names ...RecordSetName) RecordSetNameSet { - set := RecordSetNameSet{} +func NewDNSNameSet(names ...DNSSetName) DNSNameSet { + set := DNSNameSet{} set.AddAll(names...) return set } -func (s RecordSetNameSet) AddAll(names ...RecordSetName) { +func (s DNSNameSet) AddAll(names ...DNSSetName) { for _, name := range names { s.Add(name) } } -func (s RecordSetNameSet) Add(name RecordSetName) { +func (s DNSNameSet) Add(name DNSSetName) { s[name] = struct{}{} } -func (s RecordSetNameSet) Contains(name RecordSetName) bool { +func (s DNSNameSet) Contains(name DNSSetName) bool { _, ok := s[name] return ok } -func (s RecordSetNameSet) IsEmpty() bool { +func (s DNSNameSet) IsEmpty() bool { return len(s) == 0 } -func (s RecordSetNameSet) Remove(name RecordSetName) { +func (s DNSNameSet) Remove(name DNSSetName) { delete(s, name) } -func NewRecordSetNameSetFromStringSet(dnsNames utils.StringSet, setIdentifier string) RecordSetNameSet { - set := RecordSetNameSet{} +func NewDNSNameSetFromStringSet(dnsNames utils.StringSet, setIdentifier string) DNSNameSet { + set := DNSNameSet{} for dnsname := range dnsNames { - set.Add(RecordSetName{ + set.Add(DNSSetName{ DNSName: dnsname, SetIdentifier: setIdentifier, }) diff --git a/pkg/dns/mapping.go b/pkg/dns/mapping.go index a31c425de..a7b15f27e 100644 --- a/pkg/dns/mapping.go +++ b/pkg/dns/mapping.go @@ -48,7 +48,7 @@ func MapToProvider(rtype string, dnsset *DNSSet, base string) (string, *RecordSe return rsName.DNSName, rs } -func MapToProviderEx(rtype string, dnsset *DNSSet, base string, policy *RoutingPolicy) (RecordSetName, *RecordSet) { +func MapToProviderEx(rtype string, dnsset *DNSSet, base string, policy *RoutingPolicy) (DNSSetName, *RecordSet) { dnsName := dnsset.Name.DNSName rs := dnsset.Sets[rtype] if rtype == RS_META { @@ -83,7 +83,7 @@ func CalcMetaRecordDomainNameForValidation(name string) string { return calcMetaRecordDomainName(name, TxtPrefix, "") } -func MapFromProvider(name RecordSetName, rs *RecordSet) (RecordSetName, *RecordSet) { +func MapFromProvider(name DNSSetName, rs *RecordSet) (DNSSetName, *RecordSet) { dns := name.DNSName if rs.Type == RS_TXT { prefix := rs.GetAttr(ATTR_PREFIX) diff --git a/pkg/dns/mapping_test.go b/pkg/dns/mapping_test.go index 4ec13a3b3..5717cde56 100644 --- a/pkg/dns/mapping_test.go +++ b/pkg/dns/mapping_test.go @@ -76,20 +76,20 @@ func TestMapToFromProvider(t *testing.T) { wantedRecords = append(inputRecords, &Record{"\"prefix=comment-\""}) } dnsset := DNSSet{ - Name: RecordSetName{DNSName: entry.domainName}, + Name: DNSSetName{DNSName: entry.domainName}, Sets: RecordSets{RS_META: &RecordSet{Type: RS_META, TTL: 600, Records: inputRecords}}, } actualName, actualRecordSet := MapToProviderEx(rtype, &dnsset, base, nil) - Ω(actualName).Should(Equal(RecordSetName{DNSName: entry.wantedName}), "Name should match") + Ω(actualName).Should(Equal(DNSSetName{DNSName: entry.wantedName}), "Name should match") Ω(actualRecordSet.Type).Should(Equal(RS_TXT), "Type mismatch") Ω(actualRecordSet.TTL).Should(Equal(int64(600)), "TTL mismatch") Ω(actualRecordSet.Records).Should(Equal(wantedRecords)) reversedName, reversedRecordSet := MapFromProvider(actualName, actualRecordSet) - Ω(reversedName).Should(Equal(RecordSetName{DNSName: entry.domainName}), "Reversed name should match") + Ω(reversedName).Should(Equal(DNSSetName{DNSName: entry.domainName}), "Reversed name should match") Ω(reversedRecordSet.Type).Should(Equal(RS_META), "Reversed RecordSet.Type should match") Ω(reversedRecordSet.TTL).Should(Equal(int64(600)), "TTL mismatch") Ω(reversedRecordSet.Records).Should(Equal(wantedRecords)) diff --git a/pkg/dns/provider/changemodel.go b/pkg/dns/provider/changemodel.go index d25facbe5..7e67847e4 100644 --- a/pkg/dns/provider/changemodel.go +++ b/pkg/dns/provider/changemodel.go @@ -111,7 +111,7 @@ func (this *ChangeGroup) cleanup(logger logger.LogContext, model *ChangeModel) b if model.ExistsInEquivalentZone(s.Name) { continue } - if e := model.IsStale(ZonedRecordSetName{ZoneID: model.ZoneId(), RecordSetName: s.Name}); e != nil { + if e := model.IsStale(ZonedDNSSetName{ZoneID: model.ZoneId(), DNSSetName: s.Name}); e != nil { if e.IsDeleting() { model.failedDNSNames.Add(s.Name) // preventing deletion of stale entry } @@ -191,11 +191,11 @@ type ChangeModel struct { config Config ownership dns.Ownership context *zoneReconciliation - applied map[dns.RecordSetName]*dns.DNSSet + applied map[dns.DNSSetName]*dns.DNSSet dangling *ChangeGroup providergroups map[string]*ChangeGroup zonestate DNSZoneState - failedDNSNames dns.RecordSetNameSet + failedDNSNames dns.DNSNameSet } type ChangeResult struct { @@ -210,17 +210,17 @@ func NewChangeModel(logger logger.LogContext, ownership dns.Ownership, req *zone config: config, ownership: ownership, context: req, - applied: map[dns.RecordSetName]*dns.DNSSet{}, + applied: map[dns.DNSSetName]*dns.DNSSet{}, providergroups: map[string]*ChangeGroup{}, - failedDNSNames: dns.RecordSetNameSet{}, + failedDNSNames: dns.DNSNameSet{}, } } -func (this *ChangeModel) IsStale(dns ZonedRecordSetName) *Entry { +func (this *ChangeModel) IsStale(dns ZonedDNSSetName) *Entry { return this.context.stale[dns] } -func (this *ChangeModel) ExistsInEquivalentZone(name dns.RecordSetName) bool { +func (this *ChangeModel) ExistsInEquivalentZone(name dns.DNSSetName) bool { return this.context.equivEntries != nil && this.context.equivEntries.Contains(name) } @@ -293,20 +293,20 @@ func (this *ChangeModel) Setup() error { return err } -func (this *ChangeModel) Check(name dns.RecordSetName, updateGroup string, createdAt time.Time, done DoneHandler, spec TargetSpec) ChangeResult { +func (this *ChangeModel) Check(name dns.DNSSetName, updateGroup string, createdAt time.Time, done DoneHandler, spec TargetSpec) ChangeResult { return this.Exec(false, false, name, updateGroup, createdAt, done, spec) } -func (this *ChangeModel) Apply(name dns.RecordSetName, updateGroup string, createdAt time.Time, done DoneHandler, spec TargetSpec) ChangeResult { +func (this *ChangeModel) Apply(name dns.DNSSetName, updateGroup string, createdAt time.Time, done DoneHandler, spec TargetSpec) ChangeResult { return this.Exec(true, false, name, updateGroup, createdAt, done, spec) } -func (this *ChangeModel) Delete(name dns.RecordSetName, updateGroup string, createdAt time.Time, done DoneHandler, spec TargetSpec) ChangeResult { +func (this *ChangeModel) Delete(name dns.DNSSetName, updateGroup string, createdAt time.Time, done DoneHandler, spec TargetSpec) ChangeResult { return this.Exec(true, true, name, updateGroup, createdAt, done, spec) } -func (this *ChangeModel) PseudoApply(name dns.RecordSetName) { +func (this *ChangeModel) PseudoApply(name dns.DNSSetName) { this.applied[name] = dns.NewDNSSet(name) } -func (this *ChangeModel) Exec(apply bool, delete bool, name dns.RecordSetName, updateGroup string, createdAt time.Time, done DoneHandler, spec TargetSpec) ChangeResult { +func (this *ChangeModel) Exec(apply bool, delete bool, name dns.DNSSetName, updateGroup string, createdAt time.Time, done DoneHandler, spec TargetSpec) ChangeResult { //this.Infof("%s: %v", name, targets) if len(spec.Targets()) == 0 && !delete { return ChangeResult{} @@ -445,11 +445,11 @@ func (this *ChangeModel) Update(logger logger.LogContext) error { return nil } -func (this *ChangeModel) IsFailed(name dns.RecordSetName) bool { +func (this *ChangeModel) IsFailed(name dns.DNSSetName) bool { return this.failedDNSNames.Contains(name) } -func (this *ChangeModel) wrappedDoneHandler(rsName dns.RecordSetName, done DoneHandler) DoneHandler { +func (this *ChangeModel) wrappedDoneHandler(rsName dns.DNSSetName, done DoneHandler) DoneHandler { return &changeModelDoneHandler{ changeModel: this, inner: done, @@ -463,7 +463,7 @@ func (this *ChangeModel) wrappedDoneHandler(rsName dns.RecordSetName, done DoneH type changeModelDoneHandler struct { changeModel *ChangeModel inner DoneHandler - rsName dns.RecordSetName + rsName dns.DNSSetName } func (this *changeModelDoneHandler) SetInvalid(err error) { diff --git a/pkg/dns/provider/dedicatedrecord.go b/pkg/dns/provider/dedicatedrecord.go index 28dd23ef3..5871ec896 100644 --- a/pkg/dns/provider/dedicatedrecord.go +++ b/pkg/dns/provider/dedicatedrecord.go @@ -26,7 +26,7 @@ import ( ) type DedicatedDNSAccess interface { - GetRecordSet(zone DNSHostedZone, rsName dns.RecordSetName, recordType string) (DedicatedRecordSet, error) + GetRecordSet(zone DNSHostedZone, rsName dns.DNSSetName, recordType string) (DedicatedRecordSet, error) CreateOrUpdateRecordSet(logger logger.LogContext, zone DNSHostedZone, old, new DedicatedRecordSet) error DeleteRecordSet(logger logger.LogContext, zone DNSHostedZone, rs DedicatedRecordSet) error } @@ -42,7 +42,7 @@ type DedicatedRecord interface { type DedicatedRecordSet []DedicatedRecord type dedicatedRecord struct { - dns.RecordSetName + dns.DNSSetName Type string TTL int Value string @@ -58,22 +58,22 @@ func (r *dedicatedRecord) GetDNSName() string { return r.DNSName } func (r *dedicatedRecord) GetTTL() int { return r.TTL } -func FromDedicatedRecordSet(setName dns.RecordSetName, rs *dns.RecordSet) DedicatedRecordSet { +func FromDedicatedRecordSet(setName dns.DNSSetName, rs *dns.RecordSet) DedicatedRecordSet { recordset := DedicatedRecordSet{} for _, r := range rs.Records { recordset = append(recordset, &dedicatedRecord{ - RecordSetName: setName, - Type: rs.Type, - TTL: int(rs.TTL), - Value: r.Value, + DNSSetName: setName, + Type: rs.Type, + TTL: int(rs.TTL), + Value: r.Value, }) } return recordset } -func ToDedicatedRecordset(rawrs DedicatedRecordSet) (dns.RecordSetName, *dns.RecordSet) { +func ToDedicatedRecordset(rawrs DedicatedRecordSet) (dns.DNSSetName, *dns.RecordSet) { if len(rawrs) == 0 { - return dns.RecordSetName{}, nil + return dns.DNSSetName{}, nil } dnsName := rawrs[0].GetDNSName() setIdentifier := rawrs[0].GetSetIdentifier() @@ -83,7 +83,7 @@ func ToDedicatedRecordset(rawrs DedicatedRecordSet) (dns.RecordSetName, *dns.Rec for _, r := range rawrs { records = append(records, &dns.Record{Value: r.GetValue()}) } - return dns.RecordSetName{DNSName: dnsName, SetIdentifier: setIdentifier}, dns.NewRecordSet(rtype, ttl, records) + return dns.DNSSetName{DNSName: dnsName, SetIdentifier: setIdentifier}, dns.NewRecordSet(rtype, ttl, records) } func (rs DedicatedRecordSet) GetAttr(name string) string { diff --git a/pkg/dns/provider/entry.go b/pkg/dns/provider/entry.go index dbea30bf6..1465c71bc 100644 --- a/pkg/dns/provider/entry.go +++ b/pkg/dns/provider/entry.go @@ -81,7 +81,7 @@ func (this *EntryPremise) NotifyChange(p *EntryPremise) string { type EntryVersion struct { object dnsutils.DNSSpecification providername resources.ObjectName - rsname dns.RecordSetName + rsname dns.DNSSetName targets Targets routingPolicy *dns.RoutingPolicy mappings map[string][]string @@ -99,7 +99,7 @@ type EntryVersion struct { func NewEntryVersion(object dnsutils.DNSSpecification, old *Entry) *EntryVersion { v := &EntryVersion{ object: object, - rsname: dns.RecordSetName{DNSName: object.GetDNSName(), SetIdentifier: object.GetSetIdentifier()}, + rsname: dns.DNSSetName{DNSName: object.GetDNSName(), SetIdentifier: object.GetSetIdentifier()}, targets: Targets{}, mappings: map[string][]string{}, } @@ -201,12 +201,12 @@ func (this *EntryVersion) SetIdentifier() string { return this.rsname.SetIdentifier } -func (this *EntryVersion) RecordSetName() dns.RecordSetName { +func (this *EntryVersion) DNSSetName() dns.DNSSetName { return this.rsname } -func (this *EntryVersion) ZonedDNSName() ZonedRecordSetName { - return ZonedRecordSetName{ZoneID: this.ZoneId(), RecordSetName: this.rsname} +func (this *EntryVersion) ZonedDNSName() ZonedDNSSetName { + return ZonedDNSSetName{ZoneID: this.ZoneId(), DNSSetName: this.rsname} } func (this *EntryVersion) Targets() Targets { diff --git a/pkg/dns/provider/errors/errors.go b/pkg/dns/provider/errors/errors.go index ec13cca2b..2e5eb9c0a 100644 --- a/pkg/dns/provider/errors/errors.go +++ b/pkg/dns/provider/errors/errors.go @@ -34,7 +34,7 @@ func (e *AlreadyBusyForEntry) Error() string { } type AlreadyBusyForOwner struct { - Name dns.RecordSetName + Name dns.DNSSetName EntryCreatedAt time.Time Owner string } diff --git a/pkg/dns/provider/inmemory.go b/pkg/dns/provider/inmemory.go index 3817a1b08..030f14990 100644 --- a/pkg/dns/provider/inmemory.go +++ b/pkg/dns/provider/inmemory.go @@ -121,7 +121,7 @@ func (m *InMemory) Apply(zoneID dns.ZoneID, request *ChangeRequest, metrics Metr return nil } -func buildRecordSet(req *ChangeRequest) (dns.RecordSetName, *dns.RecordSet) { +func buildRecordSet(req *ChangeRequest) (dns.DNSSetName, *dns.RecordSet) { var dnsset *dns.DNSSet switch req.Action { case R_CREATE, R_UPDATE: diff --git a/pkg/dns/provider/raw/execution.go b/pkg/dns/provider/raw/execution.go index 3d834ba76..46a0c497e 100644 --- a/pkg/dns/provider/raw/execution.go +++ b/pkg/dns/provider/raw/execution.go @@ -50,7 +50,7 @@ type Execution struct { updates RecordSet deletions RecordSet - results map[dns.RecordSetName]*result + results map[dns.DNSSetName]*result } func NewExecution(logger logger.LogContext, e Executor, state *ZoneState, zone provider.DNSHostedZone) *Execution { @@ -60,7 +60,7 @@ func NewExecution(logger logger.LogContext, e Executor, state *ZoneState, zone p zone: zone, state: state, domain: zone.Domain(), - results: map[dns.RecordSetName]*result{}, + results: map[dns.DNSSetName]*result{}, additions: RecordSet{}, updates: RecordSet{}, deletions: RecordSet{}, @@ -68,7 +68,7 @@ func NewExecution(logger logger.LogContext, e Executor, state *ZoneState, zone p } func (this *Execution) AddChange(req *provider.ChangeRequest) { - var name dns.RecordSetName + var name dns.DNSSetName var newset, oldset *dns.RecordSet if req.Addition != nil { @@ -116,7 +116,7 @@ func (this *Execution) AddChange(req *provider.ChangeRequest) { } } -func (this *Execution) add(name dns.RecordSetName, rset *dns.RecordSet, modonly bool, found *RecordSet, notfound *RecordSet) { +func (this *Execution) add(name dns.DNSSetName, rset *dns.RecordSet, modonly bool, found *RecordSet, notfound *RecordSet) { rtype := rset.Type for _, r := range rset.Records { old := this.state.GetRecord(name, rtype, r.Value) @@ -184,7 +184,7 @@ func (this *Execution) SubmitChanges() error { func (this *Execution) submit(f func(record Record, zone provider.DNSHostedZone) error, r Record) { err := f(r, this.zone) if err != nil { - res := this.results[dns.RecordSetName{DNSName: r.GetDNSName(), SetIdentifier: r.GetSetIdentifier()}] + res := this.results[dns.DNSSetName{DNSName: r.GetDNSName(), SetIdentifier: r.GetSetIdentifier()}] if res != nil { res.err = err this.Infof("operation failed for %s %s: %s", r.GetType(), r.GetDNSName(), err) diff --git a/pkg/dns/provider/raw/records.go b/pkg/dns/provider/raw/records.go index 6491f561a..648905eb7 100644 --- a/pkg/dns/provider/raw/records.go +++ b/pkg/dns/provider/raw/records.go @@ -57,13 +57,13 @@ func (this DNSSet) Clone() DNSSet { type ZoneState struct { dnssets dns.DNSSets - records map[dns.RecordSetName]DNSSet + records map[dns.DNSSetName]DNSSet } var _ provider.DNSZoneState = &ZoneState{} func NewState() *ZoneState { - return &ZoneState{records: map[dns.RecordSetName]DNSSet{}} + return &ZoneState{records: map[dns.DNSSetName]DNSSet{}} } func (this *ZoneState) GetDNSSets() dns.DNSSets { @@ -73,7 +73,7 @@ func (this *ZoneState) GetDNSSets() dns.DNSSets { func (this *ZoneState) Clone() provider.DNSZoneState { clone := NewState() clone.dnssets = this.dnssets.Clone() - clone.records = map[dns.RecordSetName]DNSSet{} + clone.records = map[dns.DNSSetName]DNSSet{} for k, v := range this.records { clone.records[k] = v.Clone() } @@ -82,7 +82,7 @@ func (this *ZoneState) Clone() provider.DNSZoneState { func (this *ZoneState) AddRecord(r Record) { if dns.SupportedRecordType(r.GetType()) { - name := dns.RecordSetName{DNSName: r.GetDNSName(), SetIdentifier: r.GetSetIdentifier()} + name := dns.DNSSetName{DNSName: r.GetDNSName(), SetIdentifier: r.GetSetIdentifier()} t := r.GetType() e := this.records[name] if e == nil { @@ -93,7 +93,7 @@ func (this *ZoneState) AddRecord(r Record) { } } -func (this *ZoneState) GetRecord(dnsname dns.RecordSetName, rtype, value string) Record { +func (this *ZoneState) GetRecord(dnsname dns.DNSSetName, rtype, value string) Record { e := this.records[dnsname] if e != nil { for _, r := range e[rtype] { diff --git a/pkg/dns/provider/state.go b/pkg/dns/provider/state.go index 95d7427ba..c25087396 100644 --- a/pkg/dns/provider/state.go +++ b/pkg/dns/provider/state.go @@ -40,24 +40,24 @@ import ( "github.com/gardener/external-dns-management/pkg/server/remote/embed" ) -type ZonedRecordSetName struct { - dns.RecordSetName +type ZonedDNSSetName struct { + dns.DNSSetName ZoneID dns.ZoneID } -func (z ZonedRecordSetName) String() string { - return fmt.Sprintf("%s[%s]", z.RecordSetName, z.ZoneID) +func (z ZonedDNSSetName) String() string { + return fmt.Sprintf("%s[%s]", z.DNSSetName, z.ZoneID) } -type RecordSetNames map[ZonedRecordSetName]*Entry +type ZonedDNSSetNames map[ZonedDNSSetName]*Entry type zoneReconciliation struct { zone *dnsHostedZone providers DNSProviders entries Entries - equivEntries dns.RecordSetNameSet + equivEntries dns.DNSNameSet ownership dns.Ownership - stale RecordSetNames + stale ZonedDNSSetNames dedicated bool deleting bool fhandler FinalizerHandler @@ -146,7 +146,7 @@ type state struct { providerRateLimiter map[resources.ObjectName]*rateLimiterData prlock sync.RWMutex - dnsnames RecordSetNames + dnsnames ZonedDNSSetNames references *References initialized bool @@ -200,7 +200,7 @@ func NewDNSState(ctx Context, ownerresc, secretresc resources.Interface, classes entries: Entries{}, outdated: newSynchronizedEntries(), blockingEntries: map[resources.ObjectName]time.Time{}, - dnsnames: map[ZonedRecordSetName]*Entry{}, + dnsnames: map[ZonedDNSSetName]*Entry{}, references: NewReferenceCache(), providerRateLimiter: map[resources.ObjectName]*rateLimiterData{}, } @@ -456,28 +456,28 @@ func (this *state) GetZonesForProvider(name resources.ObjectName) dnsHostedZones return copyZones(this.providerzones[name]) } -func (this *state) GetEntriesForZone(logger logger.LogContext, zoneid dns.ZoneID) (Entries, RecordSetNames, bool) { +func (this *state) GetEntriesForZone(logger logger.LogContext, zoneid dns.ZoneID) (Entries, ZonedDNSSetNames, bool) { this.lock.RLock() defer this.lock.RUnlock() entries := Entries{} zone := this.zones[zoneid] if zone != nil { - entries, _, stale, deleting := this.addEntriesForZone(logger, entries, RecordSetNames{}, zone) + entries, _, stale, deleting := this.addEntriesForZone(logger, entries, ZonedDNSSetNames{}, zone) return entries, stale, deleting } return entries, nil, false } -func (this *state) addEntriesForZone(logger logger.LogContext, entries Entries, stale RecordSetNames, - zone DNSHostedZone) (Entries, dns.RecordSetNameSet, RecordSetNames, bool) { +func (this *state) addEntriesForZone(logger logger.LogContext, entries Entries, stale ZonedDNSSetNames, + zone DNSHostedZone) (Entries, dns.DNSNameSet, ZonedDNSSetNames, bool) { if entries == nil { entries = Entries{} } if stale == nil { - stale = RecordSetNames{} + stale = ZonedDNSSetNames{} } - equivEntries := dns.RecordSetNameSet{} + equivEntries := dns.DNSNameSet{} deleting := true // TODO check domain := zone.Domain() // fallback if no forwarded domains are reported @@ -511,7 +511,7 @@ func (this *state) addEntriesForZone(logger logger.LogContext, entries Entries, continue } else if !provider.IncludesZone(zone.Id()) { if provider.HasEquivalentZone(zone.Id()) && e.IsActive() && !forwarded(nested, dns.DNSName) { - equivEntries.Add(dns.RecordSetName) + equivEntries.Add(dns.DNSSetName) } continue } diff --git a/pkg/dns/provider/state_entry.go b/pkg/dns/provider/state_entry.go index 6c314578f..a44fcac32 100644 --- a/pkg/dns/provider/state_entry.go +++ b/pkg/dns/provider/state_entry.go @@ -411,9 +411,9 @@ func (this *state) checkAndUpdateLock(logger logger.LogContext, entry *Entry, pr target := dnsutils.NewText(s, newTTL) records = append(records, target.AsRecord()) } - newRS := FromDedicatedRecordSet(entry.RecordSetName(), dns.NewRecordSet(dns.RS_TXT, newTTL, records)) + newRS := FromDedicatedRecordSet(entry.DNSSetName(), dns.NewRecordSet(dns.RS_TXT, newTTL, records)) - rs, err := handler.GetRecordSet(zone, entry.RecordSetName(), dns.RS_TXT) + rs, err := handler.GetRecordSet(zone, entry.DNSSetName(), dns.RS_TXT) if err != nil { return reconcile.Delay(logger, err) } @@ -515,7 +515,7 @@ func (this *state) checkAndDeleteLock(logger logger.LogContext, entry *Entry, pr handler := premise.provider.GetDedicatedDNSAccess() zone := this.zones[entry.ZoneId()] - rs, err := handler.GetRecordSet(zone, entry.RecordSetName(), dns.RS_TXT) + rs, err := handler.GetRecordSet(zone, entry.DNSSetName(), dns.RS_TXT) if err != nil { return reconcile.Delay(logger, err) } diff --git a/pkg/dns/provider/state_provider.go b/pkg/dns/provider/state_provider.go index 7d4564872..6e409582f 100644 --- a/pkg/dns/provider/state_provider.go +++ b/pkg/dns/provider/state_provider.go @@ -86,7 +86,7 @@ func (this *state) _UpdateLocalProvider(logger logger.LogContext, obj *dnsutils. if last != nil { logger.Infof("trigger entries for old zones") entries := Entries{} - stale := RecordSetNames{} + stale := ZonedDNSSetNames{} for _, z := range last.zones { this.addEntriesForZone(logger, entries, stale, z) } diff --git a/pkg/dns/provider/state_zone.go b/pkg/dns/provider/state_zone.go index ea7e134a4..3e59928e3 100644 --- a/pkg/dns/provider/state_zone.go +++ b/pkg/dns/provider/state_zone.go @@ -196,14 +196,14 @@ func (this *state) reconcileZone(logger logger.LogContext, req *zoneReconciliati spec := e.object.GetTargetSpec(e) statusUpdate := NewStatusUpdate(logger, e, this.GetContext()) if e.IsDeleting() { - changeResult = changes.Delete(e.RecordSetName(), e.ObjectName().Namespace(), e.CreatedAt(), statusUpdate, spec) + changeResult = changes.Delete(e.DNSSetName(), e.ObjectName().Namespace(), e.CreatedAt(), statusUpdate, spec) } else { if !e.NotRateLimited() { - changeResult = changes.Check(e.RecordSetName(), e.ObjectName().Namespace(), e.CreatedAt(), statusUpdate, spec) + changeResult = changes.Check(e.DNSSetName(), e.ObjectName().Namespace(), e.CreatedAt(), statusUpdate, spec) if changeResult.Modified { if accepted, delay := this.tryAcceptProviderRateLimiter(logger, e); !accepted { req.zone.nextTrigger = delay - changes.PseudoApply(e.RecordSetName()) + changes.PseudoApply(e.DNSSetName()) logger.Infof("rate limited %s, delay %.1f s", e.ObjectName(), delay.Seconds()) statusUpdate.Throttled() if delay.Seconds() > 2 { @@ -213,7 +213,7 @@ func (this *state) reconcileZone(logger logger.LogContext, req *zoneReconciliati } } } - changeResult = changes.Apply(e.RecordSetName(), e.ObjectName().Namespace(), e.CreatedAt(), statusUpdate, spec) + changeResult = changes.Apply(e.DNSSetName(), e.ObjectName().Namespace(), e.CreatedAt(), statusUpdate, spec) if changeResult.Error != nil && changeResult.Retry { conflictErr = changeResult.Error } @@ -228,7 +228,7 @@ func (this *state) reconcileZone(logger logger.LogContext, req *zoneReconciliati outdatedEntries := EntryList{} this.outdated.AddActiveZoneTo(zoneid, &outdatedEntries) for _, e := range outdatedEntries { - if changes.IsFailed(e.RecordSetName()) { + if changes.IsFailed(e.DNSSetName()) { continue } logger.Infof("cleanup outdated entry %q", e.ObjectName()) diff --git a/pkg/dns/source/defaults.go b/pkg/dns/source/defaults.go index 81373a0ab..5d35c1d4c 100644 --- a/pkg/dns/source/defaults.go +++ b/pkg/dns/source/defaults.go @@ -93,7 +93,7 @@ func (this *DefaultDNSSource) CreateDNSFeedback(obj resources.Object) DNSFeedbac func (this *DefaultDNSSource) GetDNSInfo(logger logger.LogContext, obj resources.Object, current *DNSCurrentState) (*DNSInfo, error) { info := &DNSInfo{} - info.Names = dns.NewRecordSetNameSetFromStringSet(current.AnnotatedNames, current.SetIdentifier()) + info.Names = dns.NewDNSNameSetFromStringSet(current.AnnotatedNames, current.SetIdentifier()) tgts, txts, err := this.handler(logger, obj, info.Names) info.Targets = tgts info.Text = txts diff --git a/pkg/dns/source/dnsinfo.go b/pkg/dns/source/dnsinfo.go index 939924d76..c2f438ec8 100644 --- a/pkg/dns/source/dnsinfo.go +++ b/pkg/dns/source/dnsinfo.go @@ -29,7 +29,7 @@ import ( "github.com/gardener/external-dns-management/pkg/dns" ) -func (this *sourceReconciler) exclude(name dns.RecordSetName) bool { +func (this *sourceReconciler) exclude(name dns.DNSSetName) bool { if this.excluded.Contains(name.DNSName) { return true } diff --git a/pkg/dns/source/interface.go b/pkg/dns/source/interface.go index f7f63cf96..dea05c0b8 100644 --- a/pkg/dns/source/interface.go +++ b/pkg/dns/source/interface.go @@ -30,7 +30,7 @@ import ( ) type DNSInfo struct { - Names dns.RecordSetNameSet + Names dns.DNSNameSet TTL *int64 Interval *int64 Targets utils.StringSet @@ -66,7 +66,7 @@ type DNSSourceType interface { Create(controller.Interface) (DNSSource, error) } -type DNSTargetExtractor func(logger logger.LogContext, obj resources.Object, names dns.RecordSetNameSet) (targets utils.StringSet, texts utils.StringSet, err error) +type DNSTargetExtractor func(logger logger.LogContext, obj resources.Object, names dns.DNSNameSet) (targets utils.StringSet, texts utils.StringSet, err error) type DNSSourceCreator func(controller.Interface) (DNSSource, error) type DNSState struct { @@ -75,7 +75,7 @@ type DNSState struct { } type DNSCurrentState struct { - Names map[dns.RecordSetName]*DNSState + Names map[dns.DNSSetName]*DNSState Targets utils.StringSet AnnotatedNames utils.StringSet AnnotatedRoutingPolicy *v1alpha1.RoutingPolicy diff --git a/pkg/dns/source/reconciler.go b/pkg/dns/source/reconciler.go index 762a3bca8..5d5a6cd6b 100644 --- a/pkg/dns/source/reconciler.go +++ b/pkg/dns/source/reconciler.go @@ -140,13 +140,13 @@ func (this *sourceReconciler) Setup() error { func (this *sourceReconciler) Reconcile(logger logger.LogContext, obj resources.Object) reconcile.Status { slaves := this.LookupSlaves(obj.ClusterKey()) - names := dns.RecordSetNameSet{} + names := dns.DNSNameSet{} for _, s := range slaves { - names.Add(dnsutils.DNSEntry(s).RecordSetName()) + names.Add(dnsutils.DNSEntry(s).DNSSetName()) } - found := &DNSCurrentState{Names: map[dns.RecordSetName]*DNSState{}, Targets: utils.StringSet{}} + found := &DNSCurrentState{Names: map[dns.DNSSetName]*DNSState{}, Targets: utils.StringSet{}} for n := range names { - s := this.AssertSingleSlave(logger, obj.ClusterKey(), slaves, dnsutils.RecordSetNameMatcher(n)) + s := this.AssertSingleSlave(logger, obj.ClusterKey(), slaves, dnsutils.DNSSetNameMatcher(n)) e := dnsutils.DNSEntry(s).DNSEntry() found.Names[n] = &DNSState{DNSEntryStatus: e.Status, CreationTimestamp: e.CreationTimestamp} found.Targets.AddAll(e.Spec.Targets) @@ -186,9 +186,9 @@ func (this *sourceReconciler) Reconcile(logger logger.LogContext, obj resources. return reconcile.Delay(logger, err) } } - missing := dns.RecordSetNameSet{} + missing := dns.DNSNameSet{} obsolete := []resources.Object{} - obsolete_dns := dns.RecordSetNameSet{} + obsolete_dns := dns.DNSNameSet{} current := []resources.Object{} @@ -207,7 +207,7 @@ func (this *sourceReconciler) Reconcile(logger logger.LogContext, obj resources. outer: for name := range info.Names { for _, s := range slaves { - slaveName := dnsutils.DNSEntry(s).RecordSetName() + slaveName := dnsutils.DNSEntry(s).DNSSetName() if slaveName == name { continue outer } @@ -216,7 +216,7 @@ outer: } for _, s := range slaves { - slaveName := dnsutils.DNSEntry(s).RecordSetName() + slaveName := dnsutils.DNSEntry(s).DNSSetName() if !info.Names.Contains(slaveName) { obsolete = append(obsolete, s) obsolete_dns.Add(slaveName) @@ -226,7 +226,7 @@ outer: } var notifiedErrors []string - modified := map[dns.RecordSetName]bool{} + modified := map[dns.DNSSetName]bool{} if len(missing) > 0 { if len(info.Targets) > 0 || len(info.Text) > 0 || info.OrigRef != nil { logger.Infof("found missing dns entries: %s", missing) @@ -243,7 +243,7 @@ outer: if len(obsolete_dns) > 0 { logger.Infof("found obsolete dns entries: %s", obsolete_dns) for _, o := range obsolete { - name := dnsutils.DNSEntry(o).RecordSetName() + name := dnsutils.DNSEntry(o).DNSSetName() err := this.deleteEntry(logger, o, name, feedback) if err != nil { notifiedErrors = append(notifiedErrors, fmt.Sprintf("cannot remove dns entry object %q(%s): %s", o.ClusterKey(), name, err)) @@ -253,7 +253,7 @@ outer: } if len(current) > 0 { for _, o := range current { - name := dnsutils.DNSEntry(o).RecordSetName() + name := dnsutils.DNSEntry(o).DNSSetName() mod, err := this.updateEntryFor(logger, obj, info, o) modified[name] = mod if err != nil { @@ -410,7 +410,7 @@ func (this *sourceReconciler) mapRef(obj resources.Object, info *DNSInfo) { } } -func (this *sourceReconciler) createEntryFor(logger logger.LogContext, obj resources.Object, name dns.RecordSetName, info *DNSInfo, feedback DNSFeedback) error { +func (this *sourceReconciler) createEntryFor(logger logger.LogContext, obj resources.Object, name dns.DNSSetName, info *DNSInfo, feedback DNSFeedback) error { entry := &api.DNSEntry{} entry.GenerateName = strings.ToLower(this.nameprefix + obj.GetName() + "-" + obj.GroupKind().Kind + "-") if !this.targetclasses.IsDefault() { @@ -545,7 +545,7 @@ func (this *sourceReconciler) updateEntryFor(logger logger.LogContext, obj resou return slave.Modify(f) } -func (this *sourceReconciler) deleteEntry(logger logger.LogContext, e resources.Object, name dns.RecordSetName, feedback DNSFeedback) error { +func (this *sourceReconciler) deleteEntry(logger logger.LogContext, e resources.Object, name dns.DNSSetName, feedback DNSFeedback) error { err := e.Delete() if err == nil { msg := fmt.Sprintf("deleted dns entry object %s", e.ObjectName()) diff --git a/pkg/dns/utils/utils_entry.go b/pkg/dns/utils/utils_entry.go index 3c9059191..a3a9e98df 100644 --- a/pkg/dns/utils/utils_entry.go +++ b/pkg/dns/utils/utils_entry.go @@ -140,19 +140,19 @@ func (this *DNSEntryObject) GetTargetSpec(p TargetProvider) TargetSpec { return BaseTargetSpec(this, p) } -func (this *DNSEntryObject) RecordSetName() dns.RecordSetName { +func (this *DNSEntryObject) DNSSetName() dns.DNSSetName { setIdentifier := "" if this.Spec().RoutingPolicy != nil { setIdentifier = this.Spec().RoutingPolicy.SetIdentifier } - return dns.RecordSetName{ + return dns.DNSSetName{ DNSName: this.GetDNSName(), SetIdentifier: setIdentifier, } } -func RecordSetNameMatcher(name dns.RecordSetName) resources.ObjectMatcher { +func DNSSetNameMatcher(name dns.DNSSetName) resources.ObjectMatcher { return func(o resources.Object) bool { - return DNSEntry(o).RecordSetName() == name + return DNSEntry(o).DNSSetName() == name } } diff --git a/pkg/server/remote/conversion/conversion.go b/pkg/server/remote/conversion/conversion.go index 9849be3e3..a7f598947 100644 --- a/pkg/server/remote/conversion/conversion.go +++ b/pkg/server/remote/conversion/conversion.go @@ -30,26 +30,26 @@ func MarshalDNSSets(local dns.DNSSets, protocolVersion int32) common.DNSSets { for name, dnsset := range local { if name.SetIdentifier == "" || protocolVersion == common.ProtocolVersion1 { // don't return recordsets with routing policy for protocol version 0 - result[marshalRecordSetName(name)] = MarshalDNSSet(dnsset) + result[marshalDNSSetName(name)] = MarshalDNSSet(dnsset) } } return result } -func marshalRecordSetName(name dns.RecordSetName) string { +func marshalDNSSetName(name dns.DNSSetName) string { if name.SetIdentifier == "" { return name.DNSName } return name.DNSName + "\t" + name.SetIdentifier } -func unmarshalRecordSetName(marshalledName string) dns.RecordSetName { +func unmarshalDNSSetName(marshalledName string) dns.DNSSetName { parts := strings.Split(marshalledName, "\t") setIdentifier := "" if len(parts) == 2 { setIdentifier = parts[1] } - return dns.RecordSetName{DNSName: parts[0], SetIdentifier: setIdentifier} + return dns.DNSSetName{DNSName: parts[0], SetIdentifier: setIdentifier} } func MarshalDNSSet(local *dns.DNSSet) *common.DNSSet { @@ -104,13 +104,13 @@ func MarshalPartialDNSSet(local *dns.DNSSet, recordType string) *common.PartialD func UnmarshalDNSSets(remote common.DNSSets) dns.DNSSets { local := dns.DNSSets{} for name, set := range remote { - local[unmarshalRecordSetName(name)] = UnmarshalDNSSet(set) + local[unmarshalDNSSetName(name)] = UnmarshalDNSSet(set) } return local } func UnmarshalDNSSet(remote *common.DNSSet) *dns.DNSSet { - local := dns.NewDNSSet(dns.RecordSetName{DNSName: remote.DnsName, SetIdentifier: remote.SetIdentifier}) + local := dns.NewDNSSet(dns.DNSSetName{DNSName: remote.DnsName, SetIdentifier: remote.SetIdentifier}) local.UpdateGroup = remote.UpdateGroup for typ, rs := range remote.Records { @@ -143,7 +143,7 @@ func UnmarshalRoutingPolicy(policy *common.RecordSet_RoutingPolicy) *dns.Routing } func UnmarshalPartialDNSSet(remote *common.PartialDNSSet) *dns.DNSSet { - local := dns.NewDNSSet(dns.RecordSetName{DNSName: remote.DnsName, SetIdentifier: remote.SetIdentifier}) + local := dns.NewDNSSet(dns.DNSSetName{DNSName: remote.DnsName, SetIdentifier: remote.SetIdentifier}) local.UpdateGroup = remote.UpdateGroup local.Sets[remote.RecordType] = UnmarshalRecordSet(remote.RecordSet) diff --git a/pkg/server/remote/conversion/conversion_test.go b/pkg/server/remote/conversion/conversion_test.go index b7e7dcedf..4d5b3378c 100644 --- a/pkg/server/remote/conversion/conversion_test.go +++ b/pkg/server/remote/conversion/conversion_test.go @@ -38,9 +38,9 @@ func TestMarshalDNSSets(t *testing.T) { Type: "weighted", Parameters: map[string]string{"weight": "2"}, } - sets1.AddRecordSet(dns.RecordSetName{DNSName: "b.a"}, rsb) - sets1.AddRecordSet(dns.RecordSetName{DNSName: "c.a", SetIdentifier: "id1"}, rsc1) - sets1.AddRecordSet(dns.RecordSetName{DNSName: "c.a", SetIdentifier: "id2"}, rsc2) + sets1.AddRecordSet(dns.DNSSetName{DNSName: "b.a"}, rsb) + sets1.AddRecordSet(dns.DNSSetName{DNSName: "c.a", SetIdentifier: "id1"}, rsc1) + sets1.AddRecordSet(dns.DNSSetName{DNSName: "c.a", SetIdentifier: "id2"}, rsc2) table := []struct { name string sets dns.DNSSets @@ -86,7 +86,7 @@ func doTestMarshalChangeRequest(t *testing.T, withPolicy bool) { Parameters: map[string]string{"weight": "100"}, } } - set := dns.NewDNSSet(dns.RecordSetName{DNSName: "b.a", SetIdentifier: setIdentifier}) + set := dns.NewDNSSet(dns.DNSSetName{DNSName: "b.a", SetIdentifier: setIdentifier}) set.UpdateGroup = "group1" set.SetMetaAttr(dns.ATTR_OWNER, "owner1", routingPolicy) set.SetMetaAttr(dns.ATTR_PREFIX, "comment-", routingPolicy) diff --git a/test/integration/testenv.go b/test/integration/testenv.go index d28b990cc..51822baac 100644 --- a/test/integration/testenv.go +++ b/test/integration/testenv.go @@ -792,7 +792,7 @@ func (te *TestEnv) MockInMemoryGetDNSSetEx(name, zonePrefix, dnsName string) (*d if err != nil { return nil, err } - if set := state.GetDNSSets()[dns.RecordSetName{DNSName: dnsName}]; set != nil { + if set := state.GetDNSSets()[dns.DNSSetName{DNSName: dnsName}]; set != nil { return set, nil } } From 9322222559adbd803c22df972681734368605d81 Mon Sep 17 00:00:00 2001 From: Martin Weindel Date: Thu, 14 Jul 2022 15:58:38 +0200 Subject: [PATCH 5/7] added tests for routing policies updated kubebuilder testenv --- .gitignore | 1 + go.mod | 1 + go.sum | 7 + pkg/controller/source/service/handler.go | 6 - test/functional/basics.go | 6 +- test/functional/config/config.go | 35 +- test/functional/functest-config-template.yaml | 17 + test/functional/routingpolicies.go | 176 +++++ test/functional/run.sh | 58 +- test/integration/ingressAnnotation_test.go | 78 ++ test/integration/run.sh | 64 +- test/integration/serviceAnnotation_test.go | 34 +- test/integration/suite_test.go | 53 +- test/integration/testenv.go | 84 ++- .../controller-manager-library/hack/run-in.sh | 0 .../client/clientset/clientset/clientset.go | 134 ++++ .../pkg/client/clientset/clientset/doc.go | 20 + .../client/clientset/clientset/scheme/doc.go | 20 + .../clientset/clientset/scheme/register.go | 58 ++ .../apiextensions/v1/apiextensions_client.go | 107 +++ .../v1/customresourcedefinition.go | 184 +++++ .../clientset/typed/apiextensions/v1/doc.go | 20 + .../apiextensions/v1/generated_expansion.go | 21 + .../v1beta1/apiextensions_client.go | 107 +++ .../v1beta1/customresourcedefinition.go | 184 +++++ .../typed/apiextensions/v1beta1/doc.go | 20 + .../v1beta1/generated_expansion.go | 21 + .../apis/meta/internalversion/scheme/doc.go | 17 + .../meta/internalversion/scheme/register.go | 39 + vendor/k8s.io/client-go/metadata/interface.go | 49 ++ vendor/k8s.io/client-go/metadata/metadata.go | 331 +++++++++ vendor/k8s.io/client-go/util/retry/OWNERS | 4 + vendor/k8s.io/client-go/util/retry/util.go | 105 +++ vendor/modules.txt | 23 + vendor/sigs.k8s.io/controller-runtime/LICENSE | 201 +++++ .../pkg/client/apiutil/apimachinery.go | 196 +++++ .../pkg/client/apiutil/dynamicrestmapper.go | 285 +++++++ .../controller-runtime/pkg/client/client.go | 328 +++++++++ .../pkg/client/client_cache.go | 150 ++++ .../controller-runtime/pkg/client/codec.go | 40 + .../pkg/client/config/config.go | 157 ++++ .../pkg/client/config/doc.go | 18 + .../controller-runtime/pkg/client/doc.go | 49 ++ .../controller-runtime/pkg/client/dryrun.go | 106 +++ .../pkg/client/interfaces.go | 145 ++++ .../pkg/client/metadata_client.go | 195 +++++ .../pkg/client/namespaced_client.go | 213 ++++++ .../controller-runtime/pkg/client/object.go | 77 ++ .../controller-runtime/pkg/client/options.go | 697 ++++++++++++++++++ .../controller-runtime/pkg/client/patch.go | 213 ++++++ .../controller-runtime/pkg/client/split.go | 141 ++++ .../pkg/client/typed_client.go | 205 ++++++ .../pkg/client/unstructured_client.go | 277 +++++++ .../controller-runtime/pkg/client/watch.go | 118 +++ .../pkg/conversion/conversion.go | 40 + .../controller-runtime/pkg/envtest/crd.go | 451 ++++++++++++ .../controller-runtime/pkg/envtest/doc.go | 26 + .../controller-runtime/pkg/envtest/helper.go | 69 ++ .../controller-runtime/pkg/envtest/server.go | 375 ++++++++++ .../controller-runtime/pkg/envtest/webhook.go | 428 +++++++++++ .../pkg/internal/flock/doc.go | 21 + .../pkg/internal/flock/errors.go | 24 + .../pkg/internal/flock/flock_other.go | 24 + .../pkg/internal/flock/flock_unix.go | 47 ++ .../pkg/internal/log/log.go | 32 + .../pkg/internal/objectutil/objectutil.go | 78 ++ .../pkg/internal/testing/addr/manager.go | 126 ++++ .../pkg/internal/testing/certs/tinyca.go | 224 ++++++ .../testing/controlplane/apiserver.go | 469 ++++++++++++ .../pkg/internal/testing/controlplane/auth.go | 142 ++++ .../pkg/internal/testing/controlplane/etcd.go | 202 +++++ .../internal/testing/controlplane/kubectl.go | 119 +++ .../internal/testing/controlplane/plane.go | 248 +++++++ .../pkg/internal/testing/process/arguments.go | 340 +++++++++ .../testing/process/bin_path_finder.go | 70 ++ .../pkg/internal/testing/process/process.go | 277 +++++++ .../controller-runtime/pkg/log/deleg.go | 188 +++++ .../controller-runtime/pkg/log/log.go | 102 +++ .../controller-runtime/pkg/log/null.go | 59 ++ .../pkg/log/warning_handler.go | 76 ++ .../pkg/webhook/conversion/conversion.go | 345 +++++++++ .../pkg/webhook/conversion/decoder.go | 47 ++ 82 files changed, 10389 insertions(+), 155 deletions(-) create mode 100644 test/functional/routingpolicies.go create mode 100644 test/integration/ingressAnnotation_test.go mode change 100755 => 100644 vendor/github.com/gardener/controller-manager-library/hack/run-in.sh create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/doc.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/doc.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/generated_expansion.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/doc.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go create mode 100644 vendor/k8s.io/client-go/metadata/interface.go create mode 100644 vendor/k8s.io/client-go/metadata/metadata.go create mode 100644 vendor/k8s.io/client-go/util/retry/OWNERS create mode 100644 vendor/k8s.io/client-go/util/retry/util.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/LICENSE create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/config/doc.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/conversion/conversion.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/doc.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/doc.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/errors.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_other.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_unix.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/addr/manager.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/certs/tinyca.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/apiserver.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/auth.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/etcd.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/kubectl.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/plane.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/arguments.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/bin_path_finder.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/process.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go diff --git a/.gitignore b/.gitignore index 0d3d5cd6e..9ea1437da 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,7 @@ tmp/ /cmd/dns/dns-controller-manager /main /test/functional/tmp-*.yaml +/test/functional/kubebuilder* /test/integration/kubebuilder* /test/integration/default.etcd /test/integration/integration.test diff --git a/go.mod b/go.mod index 9646dbba1..3983312ad 100644 --- a/go.mod +++ b/go.mod @@ -35,6 +35,7 @@ require ( k8s.io/client-go v0.24.1 k8s.io/code-generator v0.24.1 k8s.io/kube-openapi v0.0.0-20220603121420-31174f50af60 + sigs.k8s.io/controller-runtime v0.11.1 sigs.k8s.io/controller-tools v0.8.0 sigs.k8s.io/kind v0.11.1 ) diff --git a/go.sum b/go.sum index bcf28de2a..09a7d2b26 100644 --- a/go.sum +++ b/go.sum @@ -240,6 +240,7 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -821,10 +822,12 @@ go.uber.org/automaxprocs v1.4.0 h1:CpDZl6aOlLhReez+8S3eEotD7Jx0Os++lemPlMULQP0= go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -1157,6 +1160,7 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1363,6 +1367,7 @@ k8s.io/client-go v0.24.1 h1:w1hNdI9PFrzu3OlovVeTnf4oHDt+FJLd9Ndluvnb42E= k8s.io/client-go v0.24.1/go.mod h1:f1kIDqcEYmwXS/vTbbhopMUbhKp2JhOeVTfxgaCIlF8= k8s.io/code-generator v0.24.1 h1:zS+dvmUNaOcvsQ4faV9hXNjsKG9/pQaLnts1Wma4RM8= k8s.io/code-generator v0.24.1/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= +k8s.io/component-base v0.24.1 h1:APv6W/YmfOWZfo+XJ1mZwep/f7g7Tpwvdbo9CQLDuts= k8s.io/component-base v0.24.1/go.mod h1:DW5vQGYVCog8WYpNob3PMmmsY8A3L9QZNg4j/dV3s38= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -1388,6 +1393,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= +sigs.k8s.io/controller-runtime v0.11.1 h1:7YIHT2QnHJArj/dk9aUkYhfqfK5cIxPOX5gPECfdZLU= +sigs.k8s.io/controller-runtime v0.11.1/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA= sigs.k8s.io/controller-tools v0.8.0 h1:uUkfTGEwrguqYYfcI2RRGUnC8mYdCFDqfwPKUcNJh1o= sigs.k8s.io/controller-tools v0.8.0/go.mod h1:qE2DXhVOiEq5ijmINcFbqi9GZrrUjzB1TuJU0xa6eoY= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= diff --git a/pkg/controller/source/service/handler.go b/pkg/controller/source/service/handler.go index 35553842a..9d9147a7c 100644 --- a/pkg/controller/source/service/handler.go +++ b/pkg/controller/source/service/handler.go @@ -26,9 +26,6 @@ import ( api "k8s.io/api/core/v1" ) -// FakeTargetIP provides target for testing without load balancer -var FakeTargetIP *string - func GetTargets(logger logger.LogContext, obj resources.Object, names dns.DNSNameSet) (utils.StringSet, utils.StringSet, error) { svc := obj.Data().(*api.Service) if svc.Spec.Type != api.ServiceTypeLoadBalancer { @@ -47,8 +44,5 @@ func GetTargets(logger logger.LogContext, obj resources.Object, names dns.DNSNam } } } - if FakeTargetIP != nil { - set.Add(*FakeTargetIP) - } return set, nil, nil } diff --git a/test/functional/basics.go b/test/functional/basics.go index 9ea71a6f1..80c7560d0 100644 --- a/test/functional/basics.go +++ b/test/functional/basics.go @@ -161,10 +161,10 @@ spec: ` func init() { - addProviderTests(functestbasics) + addProviderTests(functestBasics) } -func functestbasics(cfg *config.Config, p *config.ProviderConfig) { +func functestBasics(cfg *config.Config, p *config.ProviderConfig) { _ = Describe("basics-"+p.Name, func() { It("should work with "+p.Name, func() { tmpl, err := template.New("Manifest").Parse(basicTemplate) @@ -173,7 +173,7 @@ func functestbasics(cfg *config.Config, p *config.ProviderConfig) { basePath, err := os.Getwd() Ω(err).Should(BeNil()) - err = p.CreateTempManifest(basePath, tmpl) + err = p.CreateTempManifest(basePath, "basics", tmpl) defer p.DeleteTempManifest() Ω(err).Should(BeNil()) diff --git a/test/functional/config/config.go b/test/functional/config/config.go index 7f446b886..1ceebc37a 100644 --- a/test/functional/config/config.go +++ b/test/functional/config/config.go @@ -77,23 +77,30 @@ func PrintConfigEnv() { } type ProviderConfig struct { - Name string `json:"name"` - Type string `json:"type"` - FinalizerType string `json:"finalizerType,omitempty"` - Domain string `json:"domain"` - ForeignDomain string `json:"foreignDomain,omitempty"` - SecretData string `json:"secretData"` - Prefix string `json:"prefix"` - AliasTarget string `json:"aliasTarget,omitempty"` - ZoneID string `json:"zoneID"` - PrivateDNS bool `json:"privateDNS,omitempty"` - TTL string `json:"ttl,omitempty"` - SpecProviderConfig string `json:"providerConfig,omitempty"` + Name string `json:"name"` + Type string `json:"type"` + FinalizerType string `json:"finalizerType,omitempty"` + Domain string `json:"domain"` + ForeignDomain string `json:"foreignDomain,omitempty"` + SecretData string `json:"secretData"` + Prefix string `json:"prefix"` + AliasTarget string `json:"aliasTarget,omitempty"` + ZoneID string `json:"zoneID"` + PrivateDNS bool `json:"privateDNS,omitempty"` + TTL string `json:"ttl,omitempty"` + SpecProviderConfig string `json:"providerConfig,omitempty"` + RoutingPolicySets map[string]map[string]RoutingPolicy `json:"routingPolicySets,omitempty"` Namespace string TmpManifestFilename string } +type RoutingPolicy struct { + Type string `json:"type"` + Parameters map[string]string `json:"parameters"` + Targets []string `json:"targets"` +} + type Config struct { Providers []*ProviderConfig `json:"providers"` @@ -188,9 +195,9 @@ func (p *ProviderConfig) TTLValue() int { return i } -func (p *ProviderConfig) CreateTempManifest(basePath string, manifestTemplate *template.Template) error { +func (p *ProviderConfig) CreateTempManifest(basePath, testName string, manifestTemplate *template.Template) error { p.TmpManifestFilename = "" - filename := fmt.Sprintf("%s/tmp-%s.yaml", basePath, p.Name) + filename := fmt.Sprintf("%s/tmp-%s-%s.yaml", basePath, p.Name, testName) f, err := os.Create(filename) if err != nil { return err diff --git a/test/functional/functest-config-template.yaml b/test/functional/functest-config-template.yaml index a67197715..c3110bad3 100644 --- a/test/functional/functest-config-template.yaml +++ b/test/functional/functest-config-template.yaml @@ -8,6 +8,23 @@ providers: AWS_ACCESS_KEY_ID: ... AWS_SECRET_ACCESS_KEY: .. zoneID: Z... +## for testing weighted routing policy uncomment +# routingPolicySets: +# weighted: +# id-a: +# type: weighted +# parameters: +# weight: "1" +# targets: +# - 1.1.1.1 +# - 1.1.1.2 +# id-b: +# type: weighted +# parameters: +# weight: "2" +# targets: +# - 2.2.2.1 +# - 2.2.2.2 - name: alicloud-dns-playground type: alicloud-dns diff --git a/test/functional/routingpolicies.go b/test/functional/routingpolicies.go new file mode 100644 index 000000000..18a9c1316 --- /dev/null +++ b/test/functional/routingpolicies.go @@ -0,0 +1,176 @@ +/* + * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * + */ + +package functional + +import ( + "fmt" + "os" + "text/template" + + "github.com/gardener/external-dns-management/test/functional/config" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" +) + +var routingPolicyTemplate = ` +apiVersion: v1 +kind: Secret +metadata: + name: {{.Name}}-routingpolicies + namespace: {{.Namespace}} +type: Opaque +data: +{{.SecretData}} +--- +apiVersion: dns.gardener.cloud/v1alpha1 +kind: DNSProvider +metadata: + name: {{.Name}}-routingpolicies + namespace: {{.Namespace}} +spec: + type: {{.Type}} + secretRef: + name: {{.Name}}-routingpolicies +{{if .SpecProviderConfig}} + providerConfig: +{{.SpecProviderConfig}} +{{end}} + domains: + include: + - rp.{{.Domain}} +{{ range $k, $v := .RoutingPolicySets }} +{{ range $id, $policy := $v }} +--- +apiVersion: dns.gardener.cloud/v1alpha1 +kind: DNSEntry +metadata: + name: {{$.Prefix}}{{$k}}-{{$id}} + namespace: {{$.Namespace}} +spec: + dnsName: {{$.Prefix}}{{$k}}.rp.{{$.Domain}} + ttl: {{$.TTL}} + targets: +{{ range $j, $t := $policy.Targets }} + - {{$t}} +{{ end }} + routingPolicy: + type: {{$policy.Type}} + setIdentifier: {{$id}} + parameters: +{{ range $pk, $pv := $policy.Parameters }} + {{$pk}}: '{{$pv}}' +{{ end }} +{{ end }} +{{ end }} +` + +func init() { + addProviderTests(functestRoutingPolicies) +} + +func functestRoutingPolicies(cfg *config.Config, p *config.ProviderConfig) { + _ = Describe("routingpolicies-"+p.Name, func() { + It("should work with "+p.Name, func() { + if len(p.RoutingPolicySets) == 0 { + Skip("no routing policy sets defined") + } + tmpl, err := template.New("Manifest").Parse(routingPolicyTemplate) + Ω(err).Should(BeNil()) + + basePath, err := os.Getwd() + Ω(err).Should(BeNil()) + + err = p.CreateTempManifest(basePath, "routingpolicies", tmpl) + defer p.DeleteTempManifest() + Ω(err).Should(BeNil()) + + u := cfg.Utils + + err = u.AwaitKubectlGetCRDs("dnsproviders.dns.gardener.cloud", "dnsentries.dns.gardener.cloud") + Ω(err).Should(BeNil()) + + err = u.KubectlApply(p.TmpManifestFilename) + Ω(err).Should(BeNil()) + + providerName := p.Name + "-routingpolicies" + err = u.AwaitDNSProviderReady(providerName) + Ω(err).Should(BeNil()) + + entryNames := []string{} + for k, v := range p.RoutingPolicySets { + for id := range v { + name := entryName(p, fmt.Sprintf("%s-%s", k, id)) + entryNames = append(entryNames, name) + } + } + + err = u.AwaitDNSEntriesReady(entryNames...) + Ω(err).Should(BeNil()) + + itemMap, err := u.KubectlGetAllDNSEntries() + Ω(err).Should(BeNil()) + + for k, v := range p.RoutingPolicySets { + for id, policy := range v { + params := map[string]interface{}{} + for k, v := range policy.Parameters { + params[k] = v + } + name := entryName(p, fmt.Sprintf("%s-%s", k, id)) + Ω(itemMap).Should(MatchKeys(IgnoreExtras, Keys{ + name: MatchKeys(IgnoreExtras, Keys{ + "metadata": MatchKeys(IgnoreExtras, Keys{ + "finalizers": And(HaveLen(1), ContainElement("dns.gardener.cloud/"+p.FinalizerType)), + }), + "spec": MatchKeys(IgnoreExtras, Keys{ + "dnsName": Equal(dnsNameRp(p, k)), + "targets": And(HaveLen(len(policy.Targets)), ContainElements(policy.Targets)), + }), + "status": MatchKeys(IgnoreExtras, Keys{ + "message": Equal("dns entry active"), + "provider": Equal(p.Namespace + "/" + providerName), + "providerType": Equal(p.Type), + "state": Equal("Ready"), + "targets": And(HaveLen(len(policy.Targets)), ContainElements(policy.Targets)), + "zone": Equal(p.ZoneID), + "routingPolicy": MatchAllKeys(Keys{ + "type": Equal(policy.Type), + "setIdentifier": Equal(id), + "parameters": Equal(params), + }), + }), + }), + })) + } + } + + err = u.KubectlDelete(p.TmpManifestFilename) + Ω(err).Should(BeNil()) + + err = u.AwaitDNSEntriesDeleted(entryNames...) + Ω(err).Should(BeNil()) + + err = u.AwaitDNSProviderDeleted(providerName) + Ω(err).Should(BeNil()) + }) + }) +} + +func dnsNameRp(p *config.ProviderConfig, name string) string { + return p.Prefix + name + ".rp." + p.Domain +} diff --git a/test/functional/run.sh b/test/functional/run.sh index faa5d0abc..db78393aa 100755 --- a/test/functional/run.sh +++ b/test/functional/run.sh @@ -36,12 +36,11 @@ Usage: Runs functional tests for external-dns-management for all provider using secrets from a functest-config.yaml file (see functest-config-template.yaml for details how it should look). -./run.sh [--no-dns] [-f ] [-r|--reuse] [-l] [-v] [-k|--keep] [--dns-server ] [--no-controller] [--dedicated] [-- ] +./run.sh [--no-dns] [-f ] [-r|--reuse] [-v] [-k|--keep] [--dns-server ] [--no-controller] [--dedicated] [-- ] Options: -r | --reuse reuse existing kind cluster -k | --keep keep kind cluster after run for reuse or inspection - -l use local kube-apiserver and etcd (i.e. no kind cluster) -v verbose output of script (not test itself) --dns-server dns server to use for DNS lookups (defaults to $DNS_SERVER) --no-dns do not perform DNS lookups (for faster testing) @@ -64,9 +63,6 @@ while [ "$1" != "" ]; do -v ) shift VERBOSE=true ;; - -l ) shift - LOCAL_APISERVER=true - ;; -k | --keep ) shift KEEP_CLUSTER=true ;; @@ -185,56 +181,8 @@ fi if [ "$LOCAL_APISERVER" != "" ]; then - echo using local kube-apiserver and etcd - - # download kube-apiserver, etcd, and kubectl executables from kubebuilder release - KUBEBUILDER_VERSION=2.3.2 - ARCH=$(go env GOARCH) - GOOS=$(go env GOOS) - KUBEBUILDER_BIN_DIR=$(realpath -m kubebuilder_${KUBEBUILDER_VERSION}_${GOOS}_${ARCH}/bin) - if [ ! -d $KUBEBUILDER_BIN_DIR ]; then - curl -Ls https://github.com/kubernetes-sigs/kubebuilder/releases/download/v${KUBEBUILDER_VERSION}/kubebuilder_${KUBEBUILDER_VERSION}_${GOOS}_${ARCH}.tar.gz | tar xz - fi - export PATH=$KUBEBUILDER_BIN_DIR:$PATH - mkdir -p $KUBEBUILDER_BIN_DIR/../var - - # starting etcd - echo Starting Etcd - rm -rf default.etcd - if [ "$VERBOSE" != "" ]; then - $KUBEBUILDER_BIN_DIR/etcd & - else - $KUBEBUILDER_BIN_DIR/etcd >/dev/null 2>&1 & - fi - PID_ETCD=$! - - # starting kube-apiserver - echo Starting Kube API Server - if [ "$VERBOSE" != "" ]; then - $KUBEBUILDER_BIN_DIR/kube-apiserver --etcd-servers http://localhost:2379 --cert-dir $KUBEBUILDER_BIN_DIR/../var & - else - $KUBEBUILDER_BIN_DIR/kube-apiserver --etcd-servers http://localhost:2379 --cert-dir $KUBEBUILDER_BIN_DIR/../var >/dev/null 2>&1 & - fi - PID_APISERVER=$! - sleep 3 - - # create local kubeconfig - cat > /tmp/kubeconfig-local.yaml << EOF -apiVersion: v1 -clusters: -- cluster: - server: http://localhost:8080 - name: local -contexts: -- context: - cluster: local - name: local-ctx -current-context: local-ctx -kind: Config -preferences: {} -users: [] -EOF - export KUBECONFIG=/tmp/kubeconfig-local.yaml + echo not supported + exit 1 else export KUBECONFIG=$INTEGRATION_KUBECONFIG fi diff --git a/test/integration/ingressAnnotation_test.go b/test/integration/ingressAnnotation_test.go new file mode 100644 index 000000000..67a36442a --- /dev/null +++ b/test/integration/ingressAnnotation_test.go @@ -0,0 +1,78 @@ +/* + * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * + */ + +package integration + +import ( + "github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("IngressAnnotation", func() { + It("creates DNS entry", func() { + pr, domain, _, err := testEnv.CreateSecretAndProvider("inmemory.mock", 0) + Ω(err).Should(BeNil()) + println(pr) + defer testEnv.DeleteProviderAndSecret(pr) + + fakeExternalIP := "1.2.3.4" + ingressDomain := "myingress." + domain + ttl := 456 + ingress, err := testEnv.CreateIngressWithAnnotation("myingress", ingressDomain, fakeExternalIP, ttl, nil) + Ω(err).Should(BeNil()) + routingPolicy := `{"type": "weighted", "setIdentifier": "my-id", "parameters": {"weight": "10"}}` + ingress2, err := testEnv.CreateIngressWithAnnotation("mysvc2", ingressDomain, fakeExternalIP, ttl, &routingPolicy) + Ω(err).Should(BeNil()) + + entryObj, err := testEnv.AwaitObjectByOwner("Ingress", ingress.GetName()) + Ω(err).Should(BeNil()) + + checkEntry(entryObj, pr) + entryObj, err = testEnv.GetEntry(entryObj.GetName()) + Ω(err).Should(BeNil()) + entry := UnwrapEntry(entryObj) + Ω(entry.Spec.DNSName).Should(Equal(ingressDomain)) + Ω(entry.Spec.Targets).Should(ConsistOf(fakeExternalIP)) + Ω(entry.Spec.TTL).ShouldNot(BeNil()) + Ω(*entry.Spec.TTL).Should(Equal(int64(ttl))) + + entryObj2, err := testEnv.AwaitObjectByOwner("Ingress", ingress2.GetName()) + entry2 := UnwrapEntry(entryObj2) + Ω(err).Should(BeNil()) + Ω(entry2.Spec.RoutingPolicy).ShouldNot(BeNil()) + Ω(*entry2.Spec.RoutingPolicy).Should(Equal(v1alpha1.RoutingPolicy{ + Type: "weighted", + SetIdentifier: "my-id", + Parameters: map[string]string{"weight": "10"}, + })) + + err = ingress.Delete() + Ω(err).Should(BeNil()) + err = ingress2.Delete() + Ω(err).Should(BeNil()) + + err = testEnv.AwaitIngressDeletion(ingress.GetName()) + Ω(err).Should(BeNil()) + err = testEnv.AwaitIngressDeletion(ingress2.GetName()) + Ω(err).Should(BeNil()) + + err = testEnv.AwaitEntryDeletion(entryObj.GetName()) + Ω(err).Should(BeNil()) + err = testEnv.AwaitEntryDeletion(entryObj2.GetName()) + Ω(err).Should(BeNil()) + }) +}) diff --git a/test/integration/run.sh b/test/integration/run.sh index da98257c7..2d25a3961 100755 --- a/test/integration/run.sh +++ b/test/integration/run.sh @@ -92,64 +92,22 @@ fi cd $ROOTDIR/test/integration if [ "$LOCAL_APISERVER" != "" ]; then - echo using local kube-apiserver and etcd - - # download kube-apiserver, etcd, and kubectl executables from kubebuilder release - KUBEBUILDER_VERSION=2.3.2 - ARCH=$(go env GOARCH) - GOOS=$(go env GOOS) - KUBEBUILDER_BIN_DIR=$(realpath -m kubebuilder_${KUBEBUILDER_VERSION}_${GOOS}_${ARCH}/bin) - if [ ! -d $KUBEBUILDER_BIN_DIR ]; then - curl -Ls https://github.com/kubernetes-sigs/kubebuilder/releases/download/v${KUBEBUILDER_VERSION}/kubebuilder_${KUBEBUILDER_VERSION}_${GOOS}_${ARCH}.tar.gz | tar xz + unset USE_EXISTING_CLUSTER + echo using controller runtime envtest + + K8S_VERSION=1.24.2 + KUBEBUILDER_DIR=$(realpath -m kubebuilder_${K8S_VERSION}) + if [ ! -d "$KUBEBUILDER_DIR" ]; then + curl -sSL "https://go.kubebuilder.io/test-tools/${K8S_VERSION}/$(go env GOOS)/$(go env GOARCH)" | tar -xvz + mv kubebuilder "$KUBEBUILDER_DIR" fi - export PATH=$KUBEBUILDER_BIN_DIR:$PATH - mkdir -p $KUBEBUILDER_BIN_DIR/../var - - # starting etcd - echo Starting Etcd - rm -rf default.etcd - if [ "$VERBOSE" != "" ]; then - $KUBEBUILDER_BIN_DIR/etcd & - else - $KUBEBUILDER_BIN_DIR/etcd >/dev/null 2>&1 & - fi - PID_ETCD=$! - trap "kill $PID_ETCD" SIGINT SIGTERM EXIT - - # starting kube-apiserver - echo Starting Kube API Server - if [ "$VERBOSE" != "" ]; then - $KUBEBUILDER_BIN_DIR/kube-apiserver --etcd-servers http://localhost:2379 --cert-dir $KUBEBUILDER_BIN_DIR/../var & - else - $KUBEBUILDER_BIN_DIR/kube-apiserver --etcd-servers http://localhost:2379 --cert-dir $KUBEBUILDER_BIN_DIR/../var >/dev/null 2>&1 & - fi - PID_APISERVER=$! - trap "kill $PID_APISERVER && kill $PID_ETCD" SIGINT SIGTERM EXIT - sleep 3 - - # create local kubeconfig - cat > /tmp/kubeconfig-local.yaml << EOF -apiVersion: v1 -clusters: -- cluster: - server: http://localhost:8080 - name: local -contexts: -- context: - cluster: local - name: local-ctx -current-context: local-ctx -kind: Config -preferences: {} -users: [] -EOF - export KUBECONFIG=/tmp/kubeconfig-local.yaml + export KUBEBUILDER_ASSETS="${KUBEBUILDER_DIR}/bin" else + export USE_EXISTING_CLUSTER=true export KUBECONFIG=$INTEGRATION_KUBECONFIG + kubectl cluster-info fi -kubectl cluster-info - # install ginkgo go install -mod=vendor github.com/onsi/ginkgo/v2/ginkgo diff --git a/test/integration/serviceAnnotation_test.go b/test/integration/serviceAnnotation_test.go index 6eddf6b7b..91ea2c234 100644 --- a/test/integration/serviceAnnotation_test.go +++ b/test/integration/serviceAnnotation_test.go @@ -17,7 +17,7 @@ package integration import ( - "github.com/gardener/controller-manager-library/pkg/resources" + "github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -32,22 +32,16 @@ var _ = Describe("ServiceAnnotation", func() { fakeExternalIP := "1.2.3.4" svcDomain := "mysvc." + domain ttl := 456 - svc, err := testEnv.CreateServiceWithAnnotation("mysvc", svcDomain, fakeExternalIP, ttl) + svc, err := testEnv.CreateServiceWithAnnotation("mysvc", svcDomain, fakeExternalIP, ttl, nil) + Ω(err).Should(BeNil()) + routingPolicy := `{"type": "weighted", "setIdentifier": "my-id", "parameters": {"weight": "10"}}` + svc2, err := testEnv.CreateServiceWithAnnotation("mysvc2", svcDomain, fakeExternalIP, ttl, &routingPolicy) Ω(err).Should(BeNil()) - var entryObj resources.Object - err = testEnv.Await("Generated entry for service not found", func() (bool, error) { - var err error - entryObj, err = testEnv.FindEntryByOwner("Service", svc.GetName()) - if entryObj != nil { - return true, nil - } - return false, err - }) + entryObj, err := testEnv.AwaitObjectByOwner("Service", svc.GetName()) Ω(err).Should(BeNil()) checkEntry(entryObj, pr) - entryObj, err = testEnv.GetEntry(entryObj.GetName()) Ω(err).Should(BeNil()) entry := UnwrapEntry(entryObj) @@ -56,13 +50,29 @@ var _ = Describe("ServiceAnnotation", func() { Ω(entry.Spec.TTL).ShouldNot(BeNil()) Ω(*entry.Spec.TTL).Should(Equal(int64(ttl))) + entryObj2, err := testEnv.AwaitObjectByOwner("Service", svc2.GetName()) + entry2 := UnwrapEntry(entryObj2) + Ω(err).Should(BeNil()) + Ω(entry2.Spec.RoutingPolicy).ShouldNot(BeNil()) + Ω(*entry2.Spec.RoutingPolicy).Should(Equal(v1alpha1.RoutingPolicy{ + Type: "weighted", + SetIdentifier: "my-id", + Parameters: map[string]string{"weight": "10"}, + })) + err = svc.Delete() Ω(err).Should(BeNil()) + err = svc2.Delete() + Ω(err).Should(BeNil()) err = testEnv.AwaitServiceDeletion(svc.GetName()) Ω(err).Should(BeNil()) + err = testEnv.AwaitServiceDeletion(svc2.GetName()) + Ω(err).Should(BeNil()) err = testEnv.AwaitEntryDeletion(entryObj.GetName()) Ω(err).Should(BeNil()) + err = testEnv.AwaitEntryDeletion(entryObj2.GetName()) + Ω(err).Should(BeNil()) }) }) diff --git a/test/integration/suite_test.go b/test/integration/suite_test.go index 4a19cbad8..53904143c 100644 --- a/test/integration/suite_test.go +++ b/test/integration/suite_test.go @@ -17,6 +17,9 @@ package integration import ( + "encoding/base64" + "fmt" + "io/ioutil" "os" "testing" @@ -24,6 +27,9 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" networkingv1 "k8s.io/api/networking/v1" + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/envtest" _ "github.com/gardener/external-dns-management/pkg/controller/provider/compound/controller" _ "github.com/gardener/external-dns-management/pkg/controller/provider/mock" @@ -35,6 +41,7 @@ import ( _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" ) +var controllerRuntimeTestEnv *envtest.Environment var testEnv *TestEnv var testEnv2 *TestEnv var testCerts *certFileAndSecret @@ -50,17 +57,22 @@ func TestIntegration(t *testing.T) { var _ = BeforeSuite(func() { var err error - kubeconfig := os.Getenv("KUBECONFIG") - Ω(kubeconfig).ShouldNot(Equal("")) + controllerRuntimeTestEnv = &envtest.Environment{} + restConfig, err := controllerRuntimeTestEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(restConfig).ToNot(BeNil()) + + kubeconfigFile := createKubeconfigFile(restConfig) + os.Setenv("KUBECONFIG", kubeconfigFile) - testEnv, err = NewTestEnv(kubeconfig, "test") + testEnv, err = NewTestEnv(kubeconfigFile, "test") Ω(err).Should(BeNil()) testCerts, err = newCertFileAndSecret(testEnv) Ω(err).Should(BeNil()) args := []string{ - "--kubeconfig", kubeconfig, + "--kubeconfig", kubeconfigFile, "--identifier", "integrationtest", "--controllers", "dnscontrollers,dnssources", "--remote-access-port", "50051", @@ -81,6 +93,9 @@ var _ = BeforeSuite(func() { }) var _ = AfterSuite(func() { + if controllerRuntimeTestEnv != nil { + _ = controllerRuntimeTestEnv.Stop() + } if testCerts != nil { testCerts.cleanup() } @@ -88,3 +103,33 @@ var _ = AfterSuite(func() { testEnv.Infof("AfterSuite") } }) + +func createKubeconfigFile(cfg *rest.Config) string { + template := `apiVersion: v1 +kind: Config +clusters: + - name: testenv + cluster: + server: '%s' + certificate-authority-data: %s +contexts: + - name: testenv + context: + cluster: testenv + user: testuser +current-context: testenv +users: + - name: testuser + user: + client-certificate-data: %s + client-key-data: %s` + + tmpfile, err := ioutil.TempFile("", "kubeconfig-integration-suite-test") + Expect(err).NotTo(HaveOccurred()) + _, err = fmt.Fprintf(tmpfile, template, cfg.Host, base64.StdEncoding.EncodeToString(cfg.CAData), + base64.StdEncoding.EncodeToString(cfg.CertData), base64.StdEncoding.EncodeToString(cfg.KeyData)) + Expect(err).NotTo(HaveOccurred()) + err = tmpfile.Close() + Expect(err).NotTo(HaveOccurred()) + return tmpfile.Name() +} diff --git a/test/integration/testenv.go b/test/integration/testenv.go index 51822baac..c35a05bfc 100644 --- a/test/integration/testenv.go +++ b/test/integration/testenv.go @@ -45,7 +45,6 @@ import ( v1alpha1 "github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1" "github.com/gardener/external-dns-management/pkg/controller/provider/mock" - "github.com/gardener/external-dns-management/pkg/controller/source/service" "github.com/gardener/external-dns-management/pkg/dns" dnsprovider "github.com/gardener/external-dns-management/pkg/dns/provider" @@ -515,10 +514,18 @@ func UnwrapOwner(obj resources.Object) *v1alpha1.DNSOwner { return obj.Data().(*v1alpha1.DNSOwner) } -func (te *TestEnv) CreateIngressWithAnnotation(name, domainName string) (resources.Object, error) { +func (te *TestEnv) CreateIngressWithAnnotation(name, domainName, fakeExternalIP string, ttl int, routingPolicy *string) (resources.Object, error) { setter := func(e *networking.Ingress) { - e.Annotations = map[string]string{"dns.gardener.cloud/dnsnames": domainName} - e.Spec.Rules = []networking.IngressRule{{Host: domainName}} + e.Annotations = map[string]string{dnssource.DNS_ANNOTATION: "*", dnssource.TTL_ANNOTATION: fmt.Sprintf("%d", ttl)} + if routingPolicy != nil { + e.Annotations[dnssource.ROUTING_POLICY_ANNOTATION] = *routingPolicy + } + e.Spec.Rules = []networking.IngressRule{ + { + Host: domainName, + IngressRuleValue: networking.IngressRuleValue{}, + }, + } } ingress := &networking.Ingress{} @@ -534,6 +541,24 @@ func (te *TestEnv) CreateIngressWithAnnotation(name, domainName string) (resourc err = obj.Update() } } + if err != nil { + return obj, err + } + + if fakeExternalIP != "" { + res, err := te.resources.Get(ingress) + if err != nil { + return obj, err + } + _, _, err = res.ModifyStatus(ingress, func(data resources.ObjectData) (bool, error) { + o := data.(*networking.Ingress) + o.Status.LoadBalancer.Ingress = []corev1.LoadBalancerIngress{ + {IP: fakeExternalIP}, + } + return true, nil + }) + } + return obj, err } @@ -549,15 +574,16 @@ func (te *TestEnv) GetIngress(name string) (resources.Object, *networking.Ingres return obj, obj.Data().(*networking.Ingress), nil } -func (te *TestEnv) CreateServiceWithAnnotation(name, domainName, fakeExternalIP string, ttl int) (resources.Object, error) { +func (te *TestEnv) CreateServiceWithAnnotation(name, domainName, fakeExternalIP string, ttl int, routingPolicy *string) (resources.Object, error) { setter := func(e *corev1.Service) { - e.Annotations = map[string]string{"dns.gardener.cloud/dnsnames": domainName, "dns.gardener.cloud/ttl": fmt.Sprintf("%d", ttl)} + e.Annotations = map[string]string{dnssource.DNS_ANNOTATION: domainName, dnssource.TTL_ANNOTATION: fmt.Sprintf("%d", ttl)} + if routingPolicy != nil { + e.Annotations[dnssource.ROUTING_POLICY_ANNOTATION] = *routingPolicy + } e.Spec.Type = corev1.ServiceTypeLoadBalancer e.Spec.Ports = []corev1.ServicePort{{Name: "http", Port: 80, TargetPort: intstr.FromInt(8080), Protocol: corev1.ProtocolTCP}} } - ip := "1.2.3.4" - service.FakeTargetIP = &ip svc := &corev1.Service{} svc.SetName(name) svc.SetNamespace(te.Namespace) @@ -571,6 +597,24 @@ func (te *TestEnv) CreateServiceWithAnnotation(name, domainName, fakeExternalIP err = obj.Update() } } + if err != nil { + return obj, err + } + + if fakeExternalIP != "" { + res, err := te.resources.Get(svc) + if err != nil { + return obj, err + } + _, _, err = res.ModifyStatus(svc, func(data resources.ObjectData) (bool, error) { + o := data.(*corev1.Service) + o.Status.LoadBalancer.Ingress = []corev1.LoadBalancerIngress{ + {IP: fakeExternalIP}, + } + return true, nil + }) + } + return obj, err } @@ -754,6 +798,30 @@ func (te *TestEnv) AwaitServiceDeletion(name string) error { }) } +func (te *TestEnv) AwaitIngressDeletion(name string) error { + msg := fmt.Sprintf("Ingress %s still existing", name) + return te.Await(msg, func() (bool, error) { + _, _, err := te.GetIngress(name) + if errors.IsNotFound(err) { + return true, nil + } + return false, err + }) +} + +func (te *TestEnv) AwaitObjectByOwner(kind, name string) (resources.Object, error) { + var entryObj resources.Object + err := te.Await("Generated entry for service not found", func() (bool, error) { + var err error + entryObj, err = te.FindEntryByOwner(kind, name) + if entryObj != nil { + return true, nil + } + return false, err + }) + return entryObj, err +} + func (te *TestEnv) DeleteSecretByName(name string) error { secret := &corev1.Secret{} secret.SetName(name) diff --git a/vendor/github.com/gardener/controller-manager-library/hack/run-in.sh b/vendor/github.com/gardener/controller-manager-library/hack/run-in.sh old mode 100755 new mode 100644 diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go new file mode 100644 index 000000000..65ae76067 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go @@ -0,0 +1,134 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package clientset + +import ( + "fmt" + "net/http" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + ApiextensionsV1beta1() apiextensionsv1beta1.ApiextensionsV1beta1Interface + ApiextensionsV1() apiextensionsv1.ApiextensionsV1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + apiextensionsV1beta1 *apiextensionsv1beta1.ApiextensionsV1beta1Client + apiextensionsV1 *apiextensionsv1.ApiextensionsV1Client +} + +// ApiextensionsV1beta1 retrieves the ApiextensionsV1beta1Client +func (c *Clientset) ApiextensionsV1beta1() apiextensionsv1beta1.ApiextensionsV1beta1Interface { + return c.apiextensionsV1beta1 +} + +// ApiextensionsV1 retrieves the ApiextensionsV1Client +func (c *Clientset) ApiextensionsV1() apiextensionsv1.ApiextensionsV1Interface { + return c.apiextensionsV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.apiextensionsV1beta1, err = apiextensionsv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + cs.apiextensionsV1, err = apiextensionsv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.apiextensionsV1beta1 = apiextensionsv1beta1.New(c) + cs.apiextensionsV1 = apiextensionsv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go new file mode 100644 index 000000000..ee865e56d --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package clientset diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/doc.go new file mode 100644 index 000000000..7dc375616 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go new file mode 100644 index 000000000..144c20666 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go @@ -0,0 +1,58 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + apiextensionsv1beta1.AddToScheme, + apiextensionsv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go new file mode 100644 index 000000000..0bdc44c40 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "net/http" + + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + rest "k8s.io/client-go/rest" +) + +type ApiextensionsV1Interface interface { + RESTClient() rest.Interface + CustomResourceDefinitionsGetter +} + +// ApiextensionsV1Client is used to interact with features provided by the apiextensions.k8s.io group. +type ApiextensionsV1Client struct { + restClient rest.Interface +} + +func (c *ApiextensionsV1Client) CustomResourceDefinitions() CustomResourceDefinitionInterface { + return newCustomResourceDefinitions(c) +} + +// NewForConfig creates a new ApiextensionsV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*ApiextensionsV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ApiextensionsV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ApiextensionsV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &ApiextensionsV1Client{client}, nil +} + +// NewForConfigOrDie creates a new ApiextensionsV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ApiextensionsV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ApiextensionsV1Client for the given RESTClient. +func New(c rest.Interface) *ApiextensionsV1Client { + return &ApiextensionsV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ApiextensionsV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go new file mode 100644 index 000000000..5569b12d9 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go @@ -0,0 +1,184 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + scheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CustomResourceDefinitionsGetter has a method to return a CustomResourceDefinitionInterface. +// A group's client should implement this interface. +type CustomResourceDefinitionsGetter interface { + CustomResourceDefinitions() CustomResourceDefinitionInterface +} + +// CustomResourceDefinitionInterface has methods to work with CustomResourceDefinition resources. +type CustomResourceDefinitionInterface interface { + Create(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.CreateOptions) (*v1.CustomResourceDefinition, error) + Update(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (*v1.CustomResourceDefinition, error) + UpdateStatus(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (*v1.CustomResourceDefinition, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CustomResourceDefinition, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CustomResourceDefinitionList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CustomResourceDefinition, err error) + CustomResourceDefinitionExpansion +} + +// customResourceDefinitions implements CustomResourceDefinitionInterface +type customResourceDefinitions struct { + client rest.Interface +} + +// newCustomResourceDefinitions returns a CustomResourceDefinitions +func newCustomResourceDefinitions(c *ApiextensionsV1Client) *customResourceDefinitions { + return &customResourceDefinitions{ + client: c.RESTClient(), + } +} + +// Get takes name of the customResourceDefinition, and returns the corresponding customResourceDefinition object, and an error if there is any. +func (c *customResourceDefinitions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Get(). + Resource("customresourcedefinitions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CustomResourceDefinitions that match those selectors. +func (c *customResourceDefinitions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CustomResourceDefinitionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CustomResourceDefinitionList{} + err = c.client.Get(). + Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested customResourceDefinitions. +func (c *customResourceDefinitions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a customResourceDefinition and creates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. +func (c *customResourceDefinitions) Create(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.CreateOptions) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Post(). + Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(customResourceDefinition). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a customResourceDefinition and updates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. +func (c *customResourceDefinitions) Update(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Put(). + Resource("customresourcedefinitions"). + Name(customResourceDefinition.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(customResourceDefinition). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *customResourceDefinitions) UpdateStatus(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Put(). + Resource("customresourcedefinitions"). + Name(customResourceDefinition.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(customResourceDefinition). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the customResourceDefinition and deletes it. Returns an error if one occurs. +func (c *customResourceDefinitions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("customresourcedefinitions"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *customResourceDefinitions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("customresourcedefinitions"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched customResourceDefinition. +func (c *customResourceDefinitions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CustomResourceDefinition, err error) { + result = &v1.CustomResourceDefinition{} + err = c.client.Patch(pt). + Resource("customresourcedefinitions"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/doc.go new file mode 100644 index 000000000..3af5d054f --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/generated_expansion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/generated_expansion.go new file mode 100644 index 000000000..e594636af --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type CustomResourceDefinitionExpansion interface{} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go new file mode 100644 index 000000000..657ce2ca8 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "net/http" + + v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + rest "k8s.io/client-go/rest" +) + +type ApiextensionsV1beta1Interface interface { + RESTClient() rest.Interface + CustomResourceDefinitionsGetter +} + +// ApiextensionsV1beta1Client is used to interact with features provided by the apiextensions.k8s.io group. +type ApiextensionsV1beta1Client struct { + restClient rest.Interface +} + +func (c *ApiextensionsV1beta1Client) CustomResourceDefinitions() CustomResourceDefinitionInterface { + return newCustomResourceDefinitions(c) +} + +// NewForConfig creates a new ApiextensionsV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*ApiextensionsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ApiextensionsV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ApiextensionsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &ApiextensionsV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new ApiextensionsV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ApiextensionsV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ApiextensionsV1beta1Client for the given RESTClient. +func New(c rest.Interface) *ApiextensionsV1beta1Client { + return &ApiextensionsV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ApiextensionsV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go new file mode 100644 index 000000000..2d16ca709 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go @@ -0,0 +1,184 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + scheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CustomResourceDefinitionsGetter has a method to return a CustomResourceDefinitionInterface. +// A group's client should implement this interface. +type CustomResourceDefinitionsGetter interface { + CustomResourceDefinitions() CustomResourceDefinitionInterface +} + +// CustomResourceDefinitionInterface has methods to work with CustomResourceDefinition resources. +type CustomResourceDefinitionInterface interface { + Create(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.CreateOptions) (*v1beta1.CustomResourceDefinition, error) + Update(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (*v1beta1.CustomResourceDefinition, error) + UpdateStatus(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (*v1beta1.CustomResourceDefinition, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CustomResourceDefinition, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CustomResourceDefinitionList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CustomResourceDefinition, err error) + CustomResourceDefinitionExpansion +} + +// customResourceDefinitions implements CustomResourceDefinitionInterface +type customResourceDefinitions struct { + client rest.Interface +} + +// newCustomResourceDefinitions returns a CustomResourceDefinitions +func newCustomResourceDefinitions(c *ApiextensionsV1beta1Client) *customResourceDefinitions { + return &customResourceDefinitions{ + client: c.RESTClient(), + } +} + +// Get takes name of the customResourceDefinition, and returns the corresponding customResourceDefinition object, and an error if there is any. +func (c *customResourceDefinitions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CustomResourceDefinition, err error) { + result = &v1beta1.CustomResourceDefinition{} + err = c.client.Get(). + Resource("customresourcedefinitions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CustomResourceDefinitions that match those selectors. +func (c *customResourceDefinitions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CustomResourceDefinitionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.CustomResourceDefinitionList{} + err = c.client.Get(). + Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested customResourceDefinitions. +func (c *customResourceDefinitions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a customResourceDefinition and creates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. +func (c *customResourceDefinitions) Create(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.CreateOptions) (result *v1beta1.CustomResourceDefinition, err error) { + result = &v1beta1.CustomResourceDefinition{} + err = c.client.Post(). + Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(customResourceDefinition). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a customResourceDefinition and updates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. +func (c *customResourceDefinitions) Update(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (result *v1beta1.CustomResourceDefinition, err error) { + result = &v1beta1.CustomResourceDefinition{} + err = c.client.Put(). + Resource("customresourcedefinitions"). + Name(customResourceDefinition.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(customResourceDefinition). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *customResourceDefinitions) UpdateStatus(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (result *v1beta1.CustomResourceDefinition, err error) { + result = &v1beta1.CustomResourceDefinition{} + err = c.client.Put(). + Resource("customresourcedefinitions"). + Name(customResourceDefinition.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(customResourceDefinition). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the customResourceDefinition and deletes it. Returns an error if one occurs. +func (c *customResourceDefinitions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("customresourcedefinitions"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *customResourceDefinitions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("customresourcedefinitions"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched customResourceDefinition. +func (c *customResourceDefinitions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CustomResourceDefinition, err error) { + result = &v1beta1.CustomResourceDefinition{} + err = c.client.Patch(pt). + Resource("customresourcedefinitions"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/doc.go new file mode 100644 index 000000000..771101956 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/generated_expansion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/generated_expansion.go new file mode 100644 index 000000000..2a989d4be --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type CustomResourceDefinitionExpansion interface{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go new file mode 100644 index 000000000..a45fa2a8a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme // import "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go new file mode 100644 index 000000000..472a9aeb2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + "k8s.io/apimachinery/pkg/apis/meta/internalversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +// Scheme is the registry for any type that adheres to the meta API spec. +var scheme = runtime.NewScheme() + +// Codecs provides access to encoding and decoding for the scheme. +var Codecs = serializer.NewCodecFactory(scheme) + +// ParameterCodec handles versioning of objects that are converted to query parameters. +var ParameterCodec = runtime.NewParameterCodec(scheme) + +// Unlike other API groups, meta internal knows about all meta external versions, but keeps +// the logic for conversion private. +func init() { + utilruntime.Must(internalversion.AddToScheme(scheme)) +} diff --git a/vendor/k8s.io/client-go/metadata/interface.go b/vendor/k8s.io/client-go/metadata/interface.go new file mode 100644 index 000000000..127c39501 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/interface.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadata + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" +) + +// Interface allows a caller to get the metadata (in the form of PartialObjectMetadata objects) +// from any Kubernetes compatible resource API. +type Interface interface { + Resource(resource schema.GroupVersionResource) Getter +} + +// ResourceInterface contains the set of methods that may be invoked on objects by their metadata. +// Update is not supported by the server, but Patch can be used for the actions Update would handle. +type ResourceInterface interface { + Delete(ctx context.Context, name string, options metav1.DeleteOptions, subresources ...string) error + DeleteCollection(ctx context.Context, options metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(ctx context.Context, name string, options metav1.GetOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) + List(ctx context.Context, opts metav1.ListOptions) (*metav1.PartialObjectMetadataList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, options metav1.PatchOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) +} + +// Getter handles both namespaced and non-namespaced resource types consistently. +type Getter interface { + Namespace(string) ResourceInterface + ResourceInterface +} diff --git a/vendor/k8s.io/client-go/metadata/metadata.go b/vendor/k8s.io/client-go/metadata/metadata.go new file mode 100644 index 000000000..8152aa124 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadata.go @@ -0,0 +1,331 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadata + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "time" + + "k8s.io/klog/v2" + + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/rest" +) + +var deleteScheme = runtime.NewScheme() +var parameterScheme = runtime.NewScheme() +var deleteOptionsCodec = serializer.NewCodecFactory(deleteScheme) +var dynamicParameterCodec = runtime.NewParameterCodec(parameterScheme) + +var versionV1 = schema.GroupVersion{Version: "v1"} + +func init() { + metav1.AddToGroupVersion(parameterScheme, versionV1) + metav1.AddToGroupVersion(deleteScheme, versionV1) +} + +// Client allows callers to retrieve the object metadata for any +// Kubernetes-compatible API endpoint. The client uses the +// meta.k8s.io/v1 PartialObjectMetadata resource to more efficiently +// retrieve just the necessary metadata, but on older servers +// (Kubernetes 1.14 and before) will retrieve the object and then +// convert the metadata. +type Client struct { + client *rest.RESTClient +} + +var _ Interface = &Client{} + +// ConfigFor returns a copy of the provided config with the +// appropriate metadata client defaults set. +func ConfigFor(inConfig *rest.Config) *rest.Config { + config := rest.CopyConfig(inConfig) + config.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + config.ContentType = "application/vnd.kubernetes.protobuf" + config.NegotiatedSerializer = metainternalversionscheme.Codecs.WithoutConversion() + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + return config +} + +// NewForConfigOrDie creates a new metadata client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) Interface { + ret, err := NewForConfig(c) + if err != nil { + panic(err) + } + return ret +} + +// NewForConfig creates a new metadata client that can retrieve object +// metadata details about any Kubernetes object (core, aggregated, or custom +// resource based) in the form of PartialObjectMetadata objects, or returns +// an error. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(inConfig *rest.Config) (Interface, error) { + config := ConfigFor(inConfig) + + httpClient, err := rest.HTTPClientFor(config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(config, httpClient) +} + +// NewForConfigAndClient creates a new metadata client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(inConfig *rest.Config, h *http.Client) (Interface, error) { + config := ConfigFor(inConfig) + // for serializing the options + config.GroupVersion = &schema.GroupVersion{} + config.APIPath = "/this-value-should-never-be-sent" + + restClient, err := rest.RESTClientForConfigAndClient(config, h) + if err != nil { + return nil, err + } + + return &Client{client: restClient}, nil +} + +type client struct { + client *Client + namespace string + resource schema.GroupVersionResource +} + +// Resource returns an interface that can access cluster or namespace +// scoped instances of resource. +func (c *Client) Resource(resource schema.GroupVersionResource) Getter { + return &client{client: c, resource: resource} +} + +// Namespace returns an interface that can access namespace-scoped instances of the +// provided resource. +func (c *client) Namespace(ns string) ResourceInterface { + ret := *c + ret.namespace = ns + return &ret +} + +// Delete removes the provided resource from the server. +func (c *client) Delete(ctx context.Context, name string, opts metav1.DeleteOptions, subresources ...string) error { + if len(name) == 0 { + return fmt.Errorf("name is required") + } + // if DeleteOptions are delivered to Negotiator for serialization, + // HTTP-Request header will bring "Content-Type: application/vnd.kubernetes.protobuf" + // apiextensions-apiserver uses unstructuredNegotiatedSerializer to decode the input, + // server-side will reply with 406 errors. + // The special treatment here is to be compatible with CRD Handler + // see: https://github.com/kubernetes/kubernetes/blob/1a845ccd076bbf1b03420fe694c85a5cd3bd6bed/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go#L843 + deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts) + if err != nil { + return err + } + + result := c.client.client. + Delete(). + AbsPath(append(c.makeURLSegments(name), subresources...)...). + SetHeader("Content-Type", runtime.ContentTypeJSON). + Body(deleteOptionsByte). + Do(ctx) + return result.Error() +} + +// DeleteCollection triggers deletion of all resources in the specified scope (namespace or cluster). +func (c *client) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOptions metav1.ListOptions) error { + // See comment on Delete + deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts) + if err != nil { + return err + } + + result := c.client.client. + Delete(). + AbsPath(c.makeURLSegments("")...). + SetHeader("Content-Type", runtime.ContentTypeJSON). + Body(deleteOptionsByte). + SpecificallyVersionedParams(&listOptions, dynamicParameterCodec, versionV1). + Do(ctx) + return result.Error() +} + +// Get returns the resource with name from the specified scope (namespace or cluster). +func (c *client) Get(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) { + if len(name) == 0 { + return nil, fmt.Errorf("name is required") + } + result := c.client.client.Get().AbsPath(append(c.makeURLSegments(name), subresources...)...). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do(ctx) + if err := result.Error(); err != nil { + return nil, err + } + obj, err := result.Get() + if runtime.IsNotRegisteredError(err) { + klog.V(5).Infof("Unable to retrieve PartialObjectMetadata: %#v", err) + rawBytes, err := result.Raw() + if err != nil { + return nil, err + } + var partial metav1.PartialObjectMetadata + if err := json.Unmarshal(rawBytes, &partial); err != nil { + return nil, fmt.Errorf("unable to decode returned object as PartialObjectMetadata: %v", err) + } + if !isLikelyObjectMetadata(&partial) { + return nil, fmt.Errorf("object does not appear to match the ObjectMeta schema: %#v", partial) + } + partial.TypeMeta = metav1.TypeMeta{} + return &partial, nil + } + if err != nil { + return nil, err + } + partial, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return nil, fmt.Errorf("unexpected object, expected PartialObjectMetadata but got %T", obj) + } + return partial, nil +} + +// List returns all resources within the specified scope (namespace or cluster). +func (c *client) List(ctx context.Context, opts metav1.ListOptions) (*metav1.PartialObjectMetadataList, error) { + result := c.client.client.Get().AbsPath(c.makeURLSegments("")...). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do(ctx) + if err := result.Error(); err != nil { + return nil, err + } + obj, err := result.Get() + if runtime.IsNotRegisteredError(err) { + klog.V(5).Infof("Unable to retrieve PartialObjectMetadataList: %#v", err) + rawBytes, err := result.Raw() + if err != nil { + return nil, err + } + var partial metav1.PartialObjectMetadataList + if err := json.Unmarshal(rawBytes, &partial); err != nil { + return nil, fmt.Errorf("unable to decode returned object as PartialObjectMetadataList: %v", err) + } + partial.TypeMeta = metav1.TypeMeta{} + return &partial, nil + } + if err != nil { + return nil, err + } + partial, ok := obj.(*metav1.PartialObjectMetadataList) + if !ok { + return nil, fmt.Errorf("unexpected object, expected PartialObjectMetadata but got %T", obj) + } + return partial, nil +} + +// Watch finds all changes to the resources in the specified scope (namespace or cluster). +func (c *client) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.client.Get(). + AbsPath(c.makeURLSegments("")...). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Timeout(timeout). + Watch(ctx) +} + +// Patch modifies the named resource in the specified scope (namespace or cluster). +func (c *client) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) { + if len(name) == 0 { + return nil, fmt.Errorf("name is required") + } + result := c.client.client. + Patch(pt). + AbsPath(append(c.makeURLSegments(name), subresources...)...). + Body(data). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do(ctx) + if err := result.Error(); err != nil { + return nil, err + } + obj, err := result.Get() + if runtime.IsNotRegisteredError(err) { + rawBytes, err := result.Raw() + if err != nil { + return nil, err + } + var partial metav1.PartialObjectMetadata + if err := json.Unmarshal(rawBytes, &partial); err != nil { + return nil, fmt.Errorf("unable to decode returned object as PartialObjectMetadata: %v", err) + } + if !isLikelyObjectMetadata(&partial) { + return nil, fmt.Errorf("object does not appear to match the ObjectMeta schema") + } + partial.TypeMeta = metav1.TypeMeta{} + return &partial, nil + } + if err != nil { + return nil, err + } + partial, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return nil, fmt.Errorf("unexpected object, expected PartialObjectMetadata but got %T", obj) + } + return partial, nil +} + +func (c *client) makeURLSegments(name string) []string { + url := []string{} + if len(c.resource.Group) == 0 { + url = append(url, "api") + } else { + url = append(url, "apis", c.resource.Group) + } + url = append(url, c.resource.Version) + + if len(c.namespace) > 0 { + url = append(url, "namespaces", c.namespace) + } + url = append(url, c.resource.Resource) + + if len(name) > 0 { + url = append(url, name) + } + + return url +} + +func isLikelyObjectMetadata(meta *metav1.PartialObjectMetadata) bool { + return len(meta.UID) > 0 || !meta.CreationTimestamp.IsZero() || len(meta.Name) > 0 || len(meta.GenerateName) > 0 +} diff --git a/vendor/k8s.io/client-go/util/retry/OWNERS b/vendor/k8s.io/client-go/util/retry/OWNERS new file mode 100644 index 000000000..75736b5aa --- /dev/null +++ b/vendor/k8s.io/client-go/util/retry/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - caesarxuchao diff --git a/vendor/k8s.io/client-go/util/retry/util.go b/vendor/k8s.io/client-go/util/retry/util.go new file mode 100644 index 000000000..772f5bd7a --- /dev/null +++ b/vendor/k8s.io/client-go/util/retry/util.go @@ -0,0 +1,105 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package retry + +import ( + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/wait" +) + +// DefaultRetry is the recommended retry for a conflict where multiple clients +// are making changes to the same resource. +var DefaultRetry = wait.Backoff{ + Steps: 5, + Duration: 10 * time.Millisecond, + Factor: 1.0, + Jitter: 0.1, +} + +// DefaultBackoff is the recommended backoff for a conflict where a client +// may be attempting to make an unrelated modification to a resource under +// active management by one or more controllers. +var DefaultBackoff = wait.Backoff{ + Steps: 4, + Duration: 10 * time.Millisecond, + Factor: 5.0, + Jitter: 0.1, +} + +// OnError allows the caller to retry fn in case the error returned by fn is retriable +// according to the provided function. backoff defines the maximum retries and the wait +// interval between two retries. +func OnError(backoff wait.Backoff, retriable func(error) bool, fn func() error) error { + var lastErr error + err := wait.ExponentialBackoff(backoff, func() (bool, error) { + err := fn() + switch { + case err == nil: + return true, nil + case retriable(err): + lastErr = err + return false, nil + default: + return false, err + } + }) + if err == wait.ErrWaitTimeout { + err = lastErr + } + return err +} + +// RetryOnConflict is used to make an update to a resource when you have to worry about +// conflicts caused by other code making unrelated updates to the resource at the same +// time. fn should fetch the resource to be modified, make appropriate changes to it, try +// to update it, and return (unmodified) the error from the update function. On a +// successful update, RetryOnConflict will return nil. If the update function returns a +// "Conflict" error, RetryOnConflict will wait some amount of time as described by +// backoff, and then try again. On a non-"Conflict" error, or if it retries too many times +// and gives up, RetryOnConflict will return an error to the caller. +// +// err := retry.RetryOnConflict(retry.DefaultRetry, func() error { +// // Fetch the resource here; you need to refetch it on every try, since +// // if you got a conflict on the last update attempt then you need to get +// // the current version before making your own changes. +// pod, err := c.Pods("mynamespace").Get(name, metav1.GetOptions{}) +// if err != nil { +// return err +// } +// +// // Make whatever updates to the resource are needed +// pod.Status.Phase = v1.PodFailed +// +// // Try to update +// _, err = c.Pods("mynamespace").UpdateStatus(pod) +// // You have to return err itself here (not wrapped inside another error) +// // so that RetryOnConflict can identify it correctly. +// return err +// }) +// if err != nil { +// // May be conflict if max retries were hit, or may be something unrelated +// // like permissions or a network error +// return err +// } +// ... +// +// TODO: Make Backoff an interface? +func RetryOnConflict(backoff wait.Backoff, fn func() error) error { + return OnError(backoff, errors.IsConflict, fn) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index f59dd4e49..d54878430 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -755,6 +755,10 @@ k8s.io/api/storage/v1beta1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 +k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset +k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme +k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 +k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 # k8s.io/apimachinery v0.24.1 ## explicit; go 1.16 k8s.io/apimachinery/pkg/api/equality @@ -762,6 +766,7 @@ k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/resource k8s.io/apimachinery/pkg/apis/meta/internalversion +k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/apis/meta/v1/unstructured k8s.io/apimachinery/pkg/apis/meta/v1beta1 @@ -896,6 +901,7 @@ k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 k8s.io/client-go/kubernetes/typed/storage/v1 k8s.io/client-go/kubernetes/typed/storage/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1beta1 +k8s.io/client-go/metadata k8s.io/client-go/openapi k8s.io/client-go/pkg/apis/clientauthentication k8s.io/client-go/pkg/apis/clientauthentication/install @@ -930,6 +936,7 @@ k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/homedir k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil +k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue # k8s.io/code-generator v0.24.1 ## explicit; go 1.16 @@ -1016,6 +1023,22 @@ k8s.io/utils/net k8s.io/utils/pointer k8s.io/utils/strings/slices k8s.io/utils/trace +# sigs.k8s.io/controller-runtime v0.11.1 +## explicit; go 1.17 +sigs.k8s.io/controller-runtime/pkg/client +sigs.k8s.io/controller-runtime/pkg/client/apiutil +sigs.k8s.io/controller-runtime/pkg/client/config +sigs.k8s.io/controller-runtime/pkg/conversion +sigs.k8s.io/controller-runtime/pkg/envtest +sigs.k8s.io/controller-runtime/pkg/internal/flock +sigs.k8s.io/controller-runtime/pkg/internal/log +sigs.k8s.io/controller-runtime/pkg/internal/objectutil +sigs.k8s.io/controller-runtime/pkg/internal/testing/addr +sigs.k8s.io/controller-runtime/pkg/internal/testing/certs +sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane +sigs.k8s.io/controller-runtime/pkg/internal/testing/process +sigs.k8s.io/controller-runtime/pkg/log +sigs.k8s.io/controller-runtime/pkg/webhook/conversion # sigs.k8s.io/controller-tools v0.8.0 ## explicit; go 1.17 sigs.k8s.io/controller-tools/cmd/controller-gen diff --git a/vendor/sigs.k8s.io/controller-runtime/LICENSE b/vendor/sigs.k8s.io/controller-runtime/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go new file mode 100644 index 000000000..c92b0eaae --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -0,0 +1,196 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package apiutil contains utilities for working with raw Kubernetes +// API machinery, such as creating RESTMappers and raw REST clients, +// and extracting the GVK of an object. +package apiutil + +import ( + "fmt" + "reflect" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/discovery" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +var ( + protobufScheme = runtime.NewScheme() + protobufSchemeLock sync.RWMutex +) + +func init() { + // Currently only enabled for built-in resources which are guaranteed to implement Protocol Buffers. + // For custom resources, CRDs can not support Protocol Buffers but Aggregated API can. + // See doc: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility + if err := clientgoscheme.AddToScheme(protobufScheme); err != nil { + panic(err) + } +} + +// AddToProtobufScheme add the given SchemeBuilder into protobufScheme, which should +// be additional types that do support protobuf. +func AddToProtobufScheme(addToScheme func(*runtime.Scheme) error) error { + protobufSchemeLock.Lock() + defer protobufSchemeLock.Unlock() + return addToScheme(protobufScheme) +} + +// NewDiscoveryRESTMapper constructs a new RESTMapper based on discovery +// information fetched by a new client with the given config. +func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { + // Get a mapper + dc, err := discovery.NewDiscoveryClientForConfig(c) + if err != nil { + return nil, err + } + gr, err := restmapper.GetAPIGroupResources(dc) + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryRESTMapper(gr), nil +} + +// GVKForObject finds the GroupVersionKind associated with the given object, if there is only a single such GVK. +func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) { + // TODO(directxman12): do we want to generalize this to arbitrary container types? + // I think we'd need a generalized form of scheme or something. It's a + // shame there's not a reliable "GetGVK" interface that works by default + // for unpopulated static types and populated "dynamic" types + // (unstructured, partial, etc) + + // check for PartialObjectMetadata, which is analogous to unstructured, but isn't handled by ObjectKinds + _, isPartial := obj.(*metav1.PartialObjectMetadata) //nolint:ifshort + _, isPartialList := obj.(*metav1.PartialObjectMetadataList) + if isPartial || isPartialList { + // we require that the GVK be populated in order to recognize the object + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingKindErr("unstructured object has no kind") + } + if len(gvk.Version) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingVersionErr("unstructured object has no version") + } + return gvk, nil + } + + gvks, isUnversioned, err := scheme.ObjectKinds(obj) + if err != nil { + return schema.GroupVersionKind{}, err + } + if isUnversioned { + return schema.GroupVersionKind{}, fmt.Errorf("cannot create group-version-kind for unversioned type %T", obj) + } + + if len(gvks) < 1 { + return schema.GroupVersionKind{}, fmt.Errorf("no group-version-kinds associated with type %T", obj) + } + if len(gvks) > 1 { + // this should only trigger for things like metav1.XYZ -- + // normal versioned types should be fine + return schema.GroupVersionKind{}, fmt.Errorf( + "multiple group-version-kinds associated with type %T, refusing to guess at one", obj) + } + return gvks[0], nil +} + +// RESTClientForGVK constructs a new rest.Interface capable of accessing the resource associated +// with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from +// baseConfig, if set, otherwise a default serializer will be set. +func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) { + return rest.RESTClientFor(createRestConfig(gvk, isUnstructured, baseConfig, codecs)) +} + +// serializerWithDecodedGVK is a CodecFactory that overrides the DecoderToVersion of a WithoutConversionCodecFactory +// in order to avoid clearing the GVK from the decoded object. +// +// See https://github.com/kubernetes/kubernetes/issues/80609. +type serializerWithDecodedGVK struct { + serializer.WithoutConversionCodecFactory +} + +// DecoderToVersion returns an decoder that does not do conversion. +func (f serializerWithDecodedGVK) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { + return serializer +} + +// createRestConfig copies the base config and updates needed fields for a new rest config. +func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) *rest.Config { + gv := gvk.GroupVersion() + + cfg := rest.CopyConfig(baseConfig) + cfg.GroupVersion = &gv + if gvk.Group == "" { + cfg.APIPath = "/api" + } else { + cfg.APIPath = "/apis" + } + if cfg.UserAgent == "" { + cfg.UserAgent = rest.DefaultKubernetesUserAgent() + } + // TODO(FillZpp): In the long run, we want to check discovery or something to make sure that this is actually true. + if cfg.ContentType == "" && !isUnstructured { + protobufSchemeLock.RLock() + if protobufScheme.Recognizes(gvk) { + cfg.ContentType = runtime.ContentTypeProtobuf + } + protobufSchemeLock.RUnlock() + } + + if isUnstructured { + // If the object is unstructured, we need to preserve the GVK information. + // Use our own custom serializer. + cfg.NegotiatedSerializer = serializerWithDecodedGVK{serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} + } else { + cfg.NegotiatedSerializer = serializerWithTargetZeroingDecode{NegotiatedSerializer: serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} + } + + return cfg +} + +type serializerWithTargetZeroingDecode struct { + runtime.NegotiatedSerializer +} + +func (s serializerWithTargetZeroingDecode) DecoderToVersion(serializer runtime.Decoder, r runtime.GroupVersioner) runtime.Decoder { + return targetZeroingDecoder{upstream: s.NegotiatedSerializer.DecoderToVersion(serializer, r)} +} + +type targetZeroingDecoder struct { + upstream runtime.Decoder +} + +func (t targetZeroingDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + zero(into) + return t.upstream.Decode(data, defaults, into) +} + +// zero zeros the value of a pointer. +func zero(x interface{}) { + if x == nil { + return + } + res := reflect.ValueOf(x).Elem() + res.Set(reflect.Zero(res.Type())) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go new file mode 100644 index 000000000..56a00371f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go @@ -0,0 +1,285 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil + +import ( + "errors" + "sync" + + "golang.org/x/time/rate" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +// dynamicRESTMapper is a RESTMapper that dynamically discovers resource +// types at runtime. +type dynamicRESTMapper struct { + mu sync.RWMutex // protects the following fields + staticMapper meta.RESTMapper + limiter *rate.Limiter + newMapper func() (meta.RESTMapper, error) + + lazy bool + // Used for lazy init. + initOnce sync.Once +} + +// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper. +type DynamicRESTMapperOption func(*dynamicRESTMapper) error + +// WithLimiter sets the RESTMapper's underlying limiter to lim. +func WithLimiter(lim *rate.Limiter) DynamicRESTMapperOption { + return func(drm *dynamicRESTMapper) error { + drm.limiter = lim + return nil + } +} + +// WithLazyDiscovery prevents the RESTMapper from discovering REST mappings +// until an API call is made. +var WithLazyDiscovery DynamicRESTMapperOption = func(drm *dynamicRESTMapper) error { + drm.lazy = true + return nil +} + +// WithCustomMapper supports setting a custom RESTMapper refresher instead of +// the default method, which uses a discovery client. +// +// This exists mainly for testing, but can be useful if you need tighter control +// over how discovery is performed, which discovery endpoints are queried, etc. +func WithCustomMapper(newMapper func() (meta.RESTMapper, error)) DynamicRESTMapperOption { + return func(drm *dynamicRESTMapper) error { + drm.newMapper = newMapper + return nil + } +} + +// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic +// RESTMapper dynamically discovers resource types at runtime. opts +// configure the RESTMapper. +func NewDynamicRESTMapper(cfg *rest.Config, opts ...DynamicRESTMapperOption) (meta.RESTMapper, error) { + client, err := discovery.NewDiscoveryClientForConfig(cfg) + if err != nil { + return nil, err + } + drm := &dynamicRESTMapper{ + limiter: rate.NewLimiter(rate.Limit(defaultRefillRate), defaultLimitSize), + newMapper: func() (meta.RESTMapper, error) { + groupResources, err := restmapper.GetAPIGroupResources(client) + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryRESTMapper(groupResources), nil + }, + } + for _, opt := range opts { + if err = opt(drm); err != nil { + return nil, err + } + } + if !drm.lazy { + if err := drm.setStaticMapper(); err != nil { + return nil, err + } + } + return drm, nil +} + +var ( + // defaultRefilRate is the default rate at which potential calls are + // added back to the "bucket" of allowed calls. + defaultRefillRate = 5 + // defaultLimitSize is the default starting/max number of potential calls + // per second. Once a call is used, it's added back to the bucket at a rate + // of defaultRefillRate per second. + defaultLimitSize = 5 +) + +// setStaticMapper sets drm's staticMapper by querying its client, regardless +// of reload backoff. +func (drm *dynamicRESTMapper) setStaticMapper() error { + newMapper, err := drm.newMapper() + if err != nil { + return err + } + drm.staticMapper = newMapper + return nil +} + +// init initializes drm only once if drm is lazy. +func (drm *dynamicRESTMapper) init() (err error) { + drm.initOnce.Do(func() { + if drm.lazy { + err = drm.setStaticMapper() + } + }) + return err +} + +// checkAndReload attempts to call the given callback, which is assumed to be dependent +// on the data in the restmapper. +// +// If the callback returns an error that matches the given error, it will attempt to reload +// the RESTMapper's data and re-call the callback once that's occurred. +// If the callback returns any other error, the function will return immediately regardless. +// +// It will take care of ensuring that reloads are rate-limited and that extraneous calls +// aren't made. If a reload would exceed the limiters rate, it returns the error return by +// the callback. +// It's thread-safe, and worries about thread-safety for the callback (so the callback does +// not need to attempt to lock the restmapper). +func (drm *dynamicRESTMapper) checkAndReload(needsReloadErr error, checkNeedsReload func() error) error { + // first, check the common path -- data is fresh enough + // (use an IIFE for the lock's defer) + err := func() error { + drm.mu.RLock() + defer drm.mu.RUnlock() + + return checkNeedsReload() + }() + + // NB(directxman12): `Is` and `As` have a confusing relationship -- + // `Is` is like `== or does this implement .Is`, whereas `As` says + // `can I type-assert into` + needsReload := errors.As(err, &needsReloadErr) + if !needsReload { + return err + } + + // if the data wasn't fresh, we'll need to try and update it, so grab the lock... + drm.mu.Lock() + defer drm.mu.Unlock() + + // ... and double-check that we didn't reload in the meantime + err = checkNeedsReload() + needsReload = errors.As(err, &needsReloadErr) + if !needsReload { + return err + } + + // we're still stale, so grab a rate-limit token if we can... + if !drm.limiter.Allow() { + // return error from static mapper here, we have refreshed often enough (exceeding rate of provided limiter) + // so that client's can handle this the same way as a "normal" NoResourceMatchError / NoKindMatchError + return err + } + + // ...reload... + if err := drm.setStaticMapper(); err != nil { + return err + } + + // ...and return the results of the closure regardless + return checkNeedsReload() +} + +// TODO: wrap reload errors on NoKindMatchError with go 1.13 errors. + +func (drm *dynamicRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + if err := drm.init(); err != nil { + return schema.GroupVersionKind{}, err + } + var gvk schema.GroupVersionKind + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvk, err = drm.staticMapper.KindFor(resource) + return err + }) + return gvk, err +} + +func (drm *dynamicRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + if err := drm.init(); err != nil { + return nil, err + } + var gvks []schema.GroupVersionKind + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvks, err = drm.staticMapper.KindsFor(resource) + return err + }) + return gvks, err +} + +func (drm *dynamicRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + if err := drm.init(); err != nil { + return schema.GroupVersionResource{}, err + } + + var gvr schema.GroupVersionResource + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvr, err = drm.staticMapper.ResourceFor(input) + return err + }) + return gvr, err +} + +func (drm *dynamicRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + if err := drm.init(); err != nil { + return nil, err + } + var gvrs []schema.GroupVersionResource + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvrs, err = drm.staticMapper.ResourcesFor(input) + return err + }) + return gvrs, err +} + +func (drm *dynamicRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + if err := drm.init(); err != nil { + return nil, err + } + var mapping *meta.RESTMapping + err := drm.checkAndReload(&meta.NoKindMatchError{}, func() error { + var err error + mapping, err = drm.staticMapper.RESTMapping(gk, versions...) + return err + }) + return mapping, err +} + +func (drm *dynamicRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + if err := drm.init(); err != nil { + return nil, err + } + var mappings []*meta.RESTMapping + err := drm.checkAndReload(&meta.NoKindMatchError{}, func() error { + var err error + mappings, err = drm.staticMapper.RESTMappings(gk, versions...) + return err + }) + return mappings, err +} + +func (drm *dynamicRESTMapper) ResourceSingularizer(resource string) (string, error) { + if err := drm.init(); err != nil { + return "", err + } + var singular string + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + singular, err = drm.staticMapper.ResourceSingularizer(resource) + return err + }) + return singular, err +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go new file mode 100644 index 000000000..bbe36c467 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -0,0 +1,328 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/metadata" + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// WarningHandlerOptions are options for configuring a +// warning handler for the client which is responsible +// for surfacing API Server warnings. +type WarningHandlerOptions struct { + // SuppressWarnings decides if the warnings from the + // API server are suppressed or surfaced in the client. + SuppressWarnings bool + // AllowDuplicateLogs does not deduplicate the to-be + // logged surfaced warnings messages. See + // log.WarningHandlerOptions for considerations + // regarding deuplication + AllowDuplicateLogs bool +} + +// Options are creation options for a Client. +type Options struct { + // Scheme, if provided, will be used to map go structs to GroupVersionKinds + Scheme *runtime.Scheme + + // Mapper, if provided, will be used to map GroupVersionKinds to Resources + Mapper meta.RESTMapper + + // Opts is used to configure the warning handler responsible for + // surfacing and handling warnings messages sent by the API server. + Opts WarningHandlerOptions +} + +// New returns a new Client using the provided config and Options. +// The returned client reads *and* writes directly from the server +// (it doesn't use object caches). It understands how to work with +// normal types (both custom resources and aggregated/built-in resources), +// as well as unstructured types. +// +// In the case of normal types, the scheme will be used to look up the +// corresponding group, version, and kind for the given type. In the +// case of unstructured types, the group, version, and kind will be extracted +// from the corresponding fields on the object. +func New(config *rest.Config, options Options) (Client, error) { + return newClient(config, options) +} + +func newClient(config *rest.Config, options Options) (*client, error) { + if config == nil { + return nil, fmt.Errorf("must provide non-nil rest.Config to client.New") + } + + if !options.Opts.SuppressWarnings { + // surface warnings + logger := log.Log.WithName("KubeAPIWarningLogger") + // Set a WarningHandler, the default WarningHandler + // is log.KubeAPIWarningLogger with deduplication enabled. + // See log.KubeAPIWarningLoggerOptions for considerations + // regarding deduplication. + rest.SetDefaultWarningHandler( + log.NewKubeAPIWarningLogger( + logger, + log.KubeAPIWarningLoggerOptions{ + Deduplicate: !options.Opts.AllowDuplicateLogs, + }, + ), + ) + } + + // Init a scheme if none provided + if options.Scheme == nil { + options.Scheme = scheme.Scheme + } + + // Init a Mapper if none provided + if options.Mapper == nil { + var err error + options.Mapper, err = apiutil.NewDynamicRESTMapper(config) + if err != nil { + return nil, err + } + } + + clientcache := &clientCache{ + config: config, + scheme: options.Scheme, + mapper: options.Mapper, + codecs: serializer.NewCodecFactory(options.Scheme), + + structuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), + unstructuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), + } + + rawMetaClient, err := metadata.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("unable to construct metadata-only client for use as part of client: %w", err) + } + + c := &client{ + typedClient: typedClient{ + cache: clientcache, + paramCodec: runtime.NewParameterCodec(options.Scheme), + }, + unstructuredClient: unstructuredClient{ + cache: clientcache, + paramCodec: noConversionParamCodec{}, + }, + metadataClient: metadataClient{ + client: rawMetaClient, + restMapper: options.Mapper, + }, + scheme: options.Scheme, + mapper: options.Mapper, + } + + return c, nil +} + +var _ Client = &client{} + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type client struct { + typedClient typedClient + unstructuredClient unstructuredClient + metadataClient metadataClient + scheme *runtime.Scheme + mapper meta.RESTMapper +} + +// resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object. +func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersionKind) { + if gvk != schema.EmptyObjectKind.GroupVersionKind() { + if v, ok := obj.(schema.ObjectKind); ok { + v.SetGroupVersionKind(gvk) + } + } +} + +// Scheme returns the scheme this client is using. +func (c *client) Scheme() *runtime.Scheme { + return c.scheme +} + +// RESTMapper returns the scheme this client is using. +func (c *client) RESTMapper() meta.RESTMapper { + return c.mapper +} + +// Create implements client.Client. +func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Create(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot create using only metadata") + default: + return c.typedClient.Create(ctx, obj, opts...) + } +} + +// Update implements client.Client. +func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Update(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update using only metadata -- did you mean to patch?") + default: + return c.typedClient.Update(ctx, obj, opts...) + } +} + +// Delete implements client.Client. +func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Delete(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Delete(ctx, obj, opts...) + default: + return c.typedClient.Delete(ctx, obj, opts...) + } +} + +// DeleteAllOf implements client.Client. +func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.DeleteAllOf(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.DeleteAllOf(ctx, obj, opts...) + default: + return c.typedClient.DeleteAllOf(ctx, obj, opts...) + } +} + +// Patch implements client.Client. +func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Patch(ctx, obj, patch, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Patch(ctx, obj, patch, opts...) + default: + return c.typedClient.Patch(ctx, obj, patch, opts...) + } +} + +// Get implements client.Client. +func (c *client) Get(ctx context.Context, key ObjectKey, obj Object) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Get(ctx, key, obj) + case *metav1.PartialObjectMetadata: + // Metadata only object should always preserve the GVK coming in from the caller. + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + return c.metadataClient.Get(ctx, key, obj) + default: + return c.typedClient.Get(ctx, key, obj) + } +} + +// List implements client.Client. +func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + switch x := obj.(type) { + case *unstructured.UnstructuredList: + return c.unstructuredClient.List(ctx, obj, opts...) + case *metav1.PartialObjectMetadataList: + // Metadata only object should always preserve the GVK. + gvk := obj.GetObjectKind().GroupVersionKind() + defer c.resetGroupVersionKind(obj, gvk) + + // Call the list client. + if err := c.metadataClient.List(ctx, obj, opts...); err != nil { + return err + } + + // Restore the GVK for each item in the list. + itemGVK := schema.GroupVersionKind{ + Group: gvk.Group, + Version: gvk.Version, + // TODO: this is producing unsafe guesses that don't actually work, + // but it matches ~99% of the cases out there. + Kind: strings.TrimSuffix(gvk.Kind, "List"), + } + for i := range x.Items { + item := &x.Items[i] + item.SetGroupVersionKind(itemGVK) + } + + return nil + default: + return c.typedClient.List(ctx, obj, opts...) + } +} + +// Status implements client.StatusClient. +func (c *client) Status() StatusWriter { + return &statusWriter{client: c} +} + +// statusWriter is client.StatusWriter that writes status subresource. +type statusWriter struct { + client *client +} + +// ensure statusWriter implements client.StatusWriter. +var _ StatusWriter = &statusWriter{} + +// Update implements client.StatusWriter. +func (sw *statusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return sw.client.unstructuredClient.UpdateStatus(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") + default: + return sw.client.typedClient.UpdateStatus(ctx, obj, opts...) + } +} + +// Patch implements client.Client. +func (sw *statusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return sw.client.unstructuredClient.PatchStatus(ctx, obj, patch, opts...) + case *metav1.PartialObjectMetadata: + return sw.client.metadataClient.PatchStatus(ctx, obj, patch, opts...) + default: + return sw.client.typedClient.PatchStatus(ctx, obj, patch, opts...) + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go new file mode 100644 index 000000000..857a0b38a --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go @@ -0,0 +1,150 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// clientCache creates and caches rest clients and metadata for Kubernetes types. +type clientCache struct { + // config is the rest.Config to talk to an apiserver + config *rest.Config + + // scheme maps go structs to GroupVersionKinds + scheme *runtime.Scheme + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper + + // codecs are used to create a REST client for a gvk + codecs serializer.CodecFactory + + // structuredResourceByType caches structured type metadata + structuredResourceByType map[schema.GroupVersionKind]*resourceMeta + // unstructuredResourceByType caches unstructured type metadata + unstructuredResourceByType map[schema.GroupVersionKind]*resourceMeta + mu sync.RWMutex +} + +// newResource maps obj to a Kubernetes Resource and constructs a client for that Resource. +// If the object is a list, the resource represents the item's type instead. +func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList, isUnstructured bool) (*resourceMeta, error) { + if strings.HasSuffix(gvk.Kind, "List") && isList { + // if this was a list, treat it as a request for the item's resource + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + client, err := apiutil.RESTClientForGVK(gvk, isUnstructured, c.config, c.codecs) + if err != nil { + return nil, err + } + mapping, err := c.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + return &resourceMeta{Interface: client, mapping: mapping, gvk: gvk}, nil +} + +// getResource returns the resource meta information for the given type of object. +// If the object is a list, the resource represents the item's type instead. +func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return nil, err + } + + _, isUnstructured := obj.(*unstructured.Unstructured) + _, isUnstructuredList := obj.(*unstructured.UnstructuredList) + isUnstructured = isUnstructured || isUnstructuredList + + // It's better to do creation work twice than to not let multiple + // people make requests at once + c.mu.RLock() + resourceByType := c.structuredResourceByType + if isUnstructured { + resourceByType = c.unstructuredResourceByType + } + r, known := resourceByType[gvk] + c.mu.RUnlock() + + if known { + return r, nil + } + + // Initialize a new Client + c.mu.Lock() + defer c.mu.Unlock() + r, err = c.newResource(gvk, meta.IsListType(obj), isUnstructured) + if err != nil { + return nil, err + } + resourceByType[gvk] = r + return r, err +} + +// getObjMeta returns objMeta containing both type and object metadata and state. +func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { + r, err := c.getResource(obj) + if err != nil { + return nil, err + } + m, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + return &objMeta{resourceMeta: r, Object: m}, err +} + +// resourceMeta caches state for a Kubernetes type. +type resourceMeta struct { + // client is the rest client used to talk to the apiserver + rest.Interface + // gvk is the GroupVersionKind of the resourceMeta + gvk schema.GroupVersionKind + // mapping is the rest mapping + mapping *meta.RESTMapping +} + +// isNamespaced returns true if the type is namespaced. +func (r *resourceMeta) isNamespaced() bool { + return r.mapping.Scope.Name() != meta.RESTScopeNameRoot +} + +// resource returns the resource name of the type. +func (r *resourceMeta) resource() string { + return r.mapping.Resource.Resource +} + +// objMeta stores type and object information about a Kubernetes type. +type objMeta struct { + // resourceMeta contains type information for the object + *resourceMeta + + // Object contains meta data for the object instance + metav1.Object +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go new file mode 100644 index 000000000..9c2923106 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go @@ -0,0 +1,40 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "errors" + "net/url" + + "k8s.io/apimachinery/pkg/conversion/queryparams" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var _ runtime.ParameterCodec = noConversionParamCodec{} + +// noConversionParamCodec is a no-conversion codec for serializing parameters into URL query strings. +// it's useful in scenarios with the unstructured client and arbitrary resources. +type noConversionParamCodec struct{} + +func (noConversionParamCodec) EncodeParameters(obj runtime.Object, to schema.GroupVersion) (url.Values, error) { + return queryparams.Convert(obj) +} + +func (noConversionParamCodec) DecodeParameters(parameters url.Values, from schema.GroupVersion, into runtime.Object) error { + return errors.New("DecodeParameters not implemented on noConversionParamCodec") +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go new file mode 100644 index 000000000..da87f2bd4 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go @@ -0,0 +1,157 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "flag" + "fmt" + "os" + "os/user" + "path/filepath" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var ( + kubeconfig string + log = logf.RuntimeLog.WithName("client").WithName("config") +) + +func init() { + // TODO: Fix this to allow double vendoring this library but still register flags on behalf of users + flag.StringVar(&kubeconfig, "kubeconfig", "", + "Paths to a kubeconfig. Only required if out-of-cluster.") +} + +// GetConfig creates a *rest.Config for talking to a Kubernetes API server. +// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running +// in cluster and use the cluster provided kubeconfig. +// +// It also applies saner defaults for QPS and burst based on the Kubernetes +// controller manager defaults (20 QPS, 30 burst) +// +// Config precedence +// +// * --kubeconfig flag pointing at a file +// +// * KUBECONFIG environment variable pointing at a file +// +// * In-cluster config if running in cluster +// +// * $HOME/.kube/config if exists. +func GetConfig() (*rest.Config, error) { + return GetConfigWithContext("") +} + +// GetConfigWithContext creates a *rest.Config for talking to a Kubernetes API server with a specific context. +// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running +// in cluster and use the cluster provided kubeconfig. +// +// It also applies saner defaults for QPS and burst based on the Kubernetes +// controller manager defaults (20 QPS, 30 burst) +// +// Config precedence +// +// * --kubeconfig flag pointing at a file +// +// * KUBECONFIG environment variable pointing at a file +// +// * In-cluster config if running in cluster +// +// * $HOME/.kube/config if exists. +func GetConfigWithContext(context string) (*rest.Config, error) { + cfg, err := loadConfig(context) + if err != nil { + return nil, err + } + + if cfg.QPS == 0.0 { + cfg.QPS = 20.0 + cfg.Burst = 30.0 + } + + return cfg, nil +} + +// loadInClusterConfig is a function used to load the in-cluster +// Kubernetes client config. This variable makes is possible to +// test the precedence of loading the config. +var loadInClusterConfig = rest.InClusterConfig + +// loadConfig loads a REST Config as per the rules specified in GetConfig. +func loadConfig(context string) (*rest.Config, error) { + // If a flag is specified with the config location, use that + if len(kubeconfig) > 0 { + return loadConfigWithContext("", &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}, context) + } + + // If the recommended kubeconfig env variable is not specified, + // try the in-cluster config. + kubeconfigPath := os.Getenv(clientcmd.RecommendedConfigPathEnvVar) + if len(kubeconfigPath) == 0 { + if c, err := loadInClusterConfig(); err == nil { + return c, nil + } + } + + // If the recommended kubeconfig env variable is set, or there + // is no in-cluster config, try the default recommended locations. + // + // NOTE: For default config file locations, upstream only checks + // $HOME for the user's home directory, but we can also try + // os/user.HomeDir when $HOME is unset. + // + // TODO(jlanford): could this be done upstream? + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + if _, ok := os.LookupEnv("HOME"); !ok { + u, err := user.Current() + if err != nil { + return nil, fmt.Errorf("could not get current user: %v", err) + } + loadingRules.Precedence = append(loadingRules.Precedence, filepath.Join(u.HomeDir, clientcmd.RecommendedHomeDir, clientcmd.RecommendedFileName)) + } + + return loadConfigWithContext("", loadingRules, context) +} + +func loadConfigWithContext(apiServerURL string, loader clientcmd.ClientConfigLoader, context string) (*rest.Config, error) { + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + loader, + &clientcmd.ConfigOverrides{ + ClusterInfo: clientcmdapi.Cluster{ + Server: apiServerURL, + }, + CurrentContext: context, + }).ClientConfig() +} + +// GetConfigOrDie creates a *rest.Config for talking to a Kubernetes apiserver. +// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running +// in cluster and use the cluster provided kubeconfig. +// +// Will log an error and exit if there is an error creating the rest.Config. +func GetConfigOrDie() *rest.Config { + config, err := GetConfig() + if err != nil { + log.Error(err, "unable to get kubeconfig") + os.Exit(1) + } + return config +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/doc.go new file mode 100644 index 000000000..796c9cf59 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package config contains libraries for initializing REST configs for talking to the Kubernetes API +package config diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go new file mode 100644 index 000000000..2965e5fa9 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go @@ -0,0 +1,49 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package client contains functionality for interacting with Kubernetes API +// servers. +// +// Clients +// +// Clients are split into two interfaces -- Readers and Writers. Readers +// get and list, while writers create, update, and delete. +// +// The New function can be used to create a new client that talks directly +// to the API server. +// +// A common pattern in Kubernetes to read from a cache and write to the API +// server. This pattern is covered by the DelegatingClient type, which can +// be used to have a client whose Reader is different from the Writer. +// +// Options +// +// Many client operations in Kubernetes support options. These options are +// represented as variadic arguments at the end of a given method call. +// For instance, to use a label selector on list, you can call +// err := someReader.List(context.Background(), &podList, client.MatchingLabels{"somelabel": "someval"}) +// +// Indexing +// +// Indexes may be added to caches using a FieldIndexer. This allows you to easily +// and efficiently look up objects with certain properties. You can then make +// use of the index by specifying a field selector on calls to List on the Reader +// corresponding to the given Cache. +// +// For instance, a Secret controller might have an index on the +// `.spec.volumes.secret.secretName` field in Pod objects, so that it could +// easily look up all pods that reference a given secret. +package client diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go new file mode 100644 index 000000000..ea25ea253 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go @@ -0,0 +1,106 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" +) + +// NewDryRunClient wraps an existing client and enforces DryRun mode +// on all mutating api calls. +func NewDryRunClient(c Client) Client { + return &dryRunClient{client: c} +} + +var _ Client = &dryRunClient{} + +// dryRunClient is a Client that wraps another Client in order to enforce DryRun mode. +type dryRunClient struct { + client Client +} + +// Scheme returns the scheme this client is using. +func (c *dryRunClient) Scheme() *runtime.Scheme { + return c.client.Scheme() +} + +// RESTMapper returns the rest mapper this client is using. +func (c *dryRunClient) RESTMapper() meta.RESTMapper { + return c.client.RESTMapper() +} + +// Create implements client.Client. +func (c *dryRunClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + return c.client.Create(ctx, obj, append(opts, DryRunAll)...) +} + +// Update implements client.Client. +func (c *dryRunClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + return c.client.Update(ctx, obj, append(opts, DryRunAll)...) +} + +// Delete implements client.Client. +func (c *dryRunClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + return c.client.Delete(ctx, obj, append(opts, DryRunAll)...) +} + +// DeleteAllOf implements client.Client. +func (c *dryRunClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + return c.client.DeleteAllOf(ctx, obj, append(opts, DryRunAll)...) +} + +// Patch implements client.Client. +func (c *dryRunClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + return c.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) +} + +// Get implements client.Client. +func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + return c.client.Get(ctx, key, obj) +} + +// List implements client.Client. +func (c *dryRunClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + return c.client.List(ctx, obj, opts...) +} + +// Status implements client.StatusClient. +func (c *dryRunClient) Status() StatusWriter { + return &dryRunStatusWriter{client: c.client.Status()} +} + +// ensure dryRunStatusWriter implements client.StatusWriter. +var _ StatusWriter = &dryRunStatusWriter{} + +// dryRunStatusWriter is client.StatusWriter that writes status subresource with dryRun mode +// enforced. +type dryRunStatusWriter struct { + client StatusWriter +} + +// Update implements client.StatusWriter. +func (sw *dryRunStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + return sw.client.Update(ctx, obj, append(opts, DryRunAll)...) +} + +// Patch implements client.StatusWriter. +func (sw *dryRunStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + return sw.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go new file mode 100644 index 000000000..58c2ece15 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go @@ -0,0 +1,145 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" +) + +// ObjectKey identifies a Kubernetes Object. +type ObjectKey = types.NamespacedName + +// ObjectKeyFromObject returns the ObjectKey given a runtime.Object. +func ObjectKeyFromObject(obj Object) ObjectKey { + return ObjectKey{Namespace: obj.GetNamespace(), Name: obj.GetName()} +} + +// Patch is a patch that can be applied to a Kubernetes object. +type Patch interface { + // Type is the PatchType of the patch. + Type() types.PatchType + // Data is the raw data representing the patch. + Data(obj Object) ([]byte, error) +} + +// TODO(directxman12): is there a sane way to deal with get/delete options? + +// Reader knows how to read and list Kubernetes objects. +type Reader interface { + // Get retrieves an obj for the given object key from the Kubernetes Cluster. + // obj must be a struct pointer so that obj can be updated with the response + // returned by the Server. + Get(ctx context.Context, key ObjectKey, obj Object) error + + // List retrieves list of objects for a given namespace and list options. On a + // successful call, Items field in the list will be populated with the + // result returned from the server. + List(ctx context.Context, list ObjectList, opts ...ListOption) error +} + +// Writer knows how to create, delete, and update Kubernetes objects. +type Writer interface { + // Create saves the object obj in the Kubernetes cluster. + Create(ctx context.Context, obj Object, opts ...CreateOption) error + + // Delete deletes the given obj from Kubernetes cluster. + Delete(ctx context.Context, obj Object, opts ...DeleteOption) error + + // Update updates the given obj in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Update(ctx context.Context, obj Object, opts ...UpdateOption) error + + // Patch patches the given obj in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error + + // DeleteAllOf deletes all objects of the given type matching the given options. + DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error +} + +// StatusClient knows how to create a client which can update status subresource +// for kubernetes objects. +type StatusClient interface { + Status() StatusWriter +} + +// StatusWriter knows how to update status subresource of a Kubernetes object. +type StatusWriter interface { + // Update updates the fields corresponding to the status subresource for the + // given obj. obj must be a struct pointer so that obj can be updated + // with the content returned by the Server. + Update(ctx context.Context, obj Object, opts ...UpdateOption) error + + // Patch patches the given object's subresource. obj must be a struct + // pointer so that obj can be updated with the content returned by the + // Server. + Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error +} + +// Client knows how to perform CRUD operations on Kubernetes objects. +type Client interface { + Reader + Writer + StatusClient + + // Scheme returns the scheme this client is using. + Scheme() *runtime.Scheme + // RESTMapper returns the rest this client is using. + RESTMapper() meta.RESTMapper +} + +// WithWatch supports Watch on top of the CRUD operations supported by +// the normal Client. Its intended use-case are CLI apps that need to wait for +// events. +type WithWatch interface { + Client + Watch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error) +} + +// IndexerFunc knows how to take an object and turn it into a series +// of non-namespaced keys. Namespaced objects are automatically given +// namespaced and non-spaced variants, so keys do not need to include namespace. +type IndexerFunc func(Object) []string + +// FieldIndexer knows how to index over a particular "field" such that it +// can later be used by a field selector. +type FieldIndexer interface { + // IndexFields adds an index with the given field name on the given object type + // by using the given function to extract the value for that field. If you want + // compatibility with the Kubernetes API server, only return one key, and only use + // fields that the API server supports. Otherwise, you can return multiple keys, + // and "equality" in the field selector means that at least one key matches the value. + // The FieldIndexer will automatically take care of indexing over namespace + // and supporting efficient all-namespace queries. + IndexField(ctx context.Context, obj Object, field string, extractValue IndexerFunc) error +} + +// IgnoreNotFound returns nil on NotFound errors. +// All other values that are not NotFound errors or nil are returned unmodified. +func IgnoreNotFound(err error) error { + if apierrors.IsNotFound(err) { + return nil + } + return err +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go new file mode 100644 index 000000000..59747463a --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go @@ -0,0 +1,195 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/metadata" +) + +// TODO(directxman12): we could rewrite this on top of the low-level REST +// client to avoid the extra shallow copy at the end, but I'm not sure it's +// worth it -- the metadata client deals with falling back to loading the whole +// object on older API servers, etc, and we'd have to reproduce that. + +// metadataClient is a client that reads & writes metadata-only requests to/from the API server. +type metadataClient struct { + client metadata.Interface + restMapper meta.RESTMapper +} + +func (mc *metadataClient) getResourceInterface(gvk schema.GroupVersionKind, ns string) (metadata.ResourceInterface, error) { + mapping, err := mc.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + if mapping.Scope.Name() == meta.RESTScopeNameRoot { + return mc.client.Resource(mapping.Resource), nil + } + return mc.client.Resource(mapping.Resource).Namespace(ns), nil +} + +// Delete implements client.Client. +func (mc *metadataClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), metadata.Namespace) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + + return resInt.Delete(ctx, metadata.Name, *deleteOpts.AsDeleteOptions()) +} + +// DeleteAllOf implements client.Client. +func (mc *metadataClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), deleteAllOfOpts.ListOptions.Namespace) + if err != nil { + return err + } + + return resInt.DeleteCollection(ctx, *deleteAllOfOpts.AsDeleteOptions(), *deleteAllOfOpts.AsListOptions()) +} + +// Patch implements client.Client. +func (mc *metadataClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + resInt, err := mc.getResourceInterface(gvk, metadata.Namespace) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + patchOpts.ApplyOptions(opts) + + res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +// Get implements client.Client. +func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + + resInt, err := mc.getResourceInterface(gvk, key.Namespace) + if err != nil { + return err + } + + res, err := resInt.Get(ctx, key.Name, metav1.GetOptions{}) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +// List implements client.Client. +func (mc *metadataClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadataList) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + if strings.HasSuffix(gvk.Kind, "List") { + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(gvk, listOpts.Namespace) + if err != nil { + return err + } + + res, err := resInt.List(ctx, *listOpts.AsListOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +func (mc *metadataClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + resInt, err := mc.getResourceInterface(gvk, metadata.Namespace) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions(), "status") + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go new file mode 100644 index 000000000..557598727 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go @@ -0,0 +1,213 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" +) + +// NewNamespacedClient wraps an existing client enforcing the namespace value. +// All functions using this client will have the same namespace declared here. +func NewNamespacedClient(c Client, ns string) Client { + return &namespacedClient{ + client: c, + namespace: ns, + } +} + +var _ Client = &namespacedClient{} + +// namespacedClient is a Client that wraps another Client in order to enforce the specified namespace value. +type namespacedClient struct { + namespace string + client Client +} + +// Scheme returns the scheme this client is using. +func (n *namespacedClient) Scheme() *runtime.Scheme { + return n.client.Scheme() +} + +// RESTMapper returns the scheme this client is using. +func (n *namespacedClient) RESTMapper() meta.RESTMapper { + return n.client.RESTMapper() +} + +// Create implements clinet.Client. +func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Create(ctx, obj, opts...) +} + +// Update implements client.Client. +func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Update(ctx, obj, opts...) +} + +// Delete implements client.Client. +func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Delete(ctx, obj, opts...) +} + +// DeleteAllOf implements client.Client. +func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + + if isNamespaceScoped { + opts = append(opts, InNamespace(n.namespace)) + } + return n.client.DeleteAllOf(ctx, obj, opts...) +} + +// Patch implements client.Client. +func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Patch(ctx, obj, patch, opts...) +} + +// Get implements client.Client. +func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + if isNamespaceScoped { + if key.Namespace != "" && key.Namespace != n.namespace { + return fmt.Errorf("namespace %s provided for the object %s does not match the namesapce %s on the client", key.Namespace, obj.GetName(), n.namespace) + } + key.Namespace = n.namespace + } + return n.client.Get(ctx, key, obj) +} + +// List implements client.Client. +func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + if n.namespace != "" { + opts = append(opts, InNamespace(n.namespace)) + } + return n.client.List(ctx, obj, opts...) +} + +// Status implements client.StatusClient. +func (n *namespacedClient) Status() StatusWriter { + return &namespacedClientStatusWriter{StatusClient: n.client.Status(), namespace: n.namespace, namespacedclient: n} +} + +// ensure namespacedClientStatusWriter implements client.StatusWriter. +var _ StatusWriter = &namespacedClientStatusWriter{} + +type namespacedClientStatusWriter struct { + StatusClient StatusWriter + namespace string + namespacedclient Client +} + +// Update implements client.StatusWriter. +func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespace) + } + return nsw.StatusClient.Update(ctx, obj, opts...) +} + +// Patch implements client.StatusWriter. +func (nsw *namespacedClientStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespace) + } + return nsw.StatusClient.Patch(ctx, obj, patch, opts...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go new file mode 100644 index 000000000..31e334d6c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go @@ -0,0 +1,77 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// Object is a Kubernetes object, allows functions to work indistinctly with +// any resource that implements both Object interfaces. +// +// Semantically, these are objects which are both serializable (runtime.Object) +// and identifiable (metav1.Object) -- think any object which you could write +// as YAML or JSON, and then `kubectl create`. +// +// Code-wise, this means that any object which embeds both ObjectMeta (which +// provides metav1.Object) and TypeMeta (which provides half of runtime.Object) +// and has a `DeepCopyObject` implementation (the other half of runtime.Object) +// will implement this by default. +// +// For example, nearly all the built-in types are Objects, as well as all +// KubeBuilder-generated CRDs (unless you do something real funky to them). +// +// By and large, most things that implement runtime.Object also implement +// Object -- it's very rare to have *just* a runtime.Object implementation (the +// cases tend to be funky built-in types like Webhook payloads that don't have +// a `metadata` field). +// +// Notice that XYZList types are distinct: they implement ObjectList instead. +type Object interface { + metav1.Object + runtime.Object +} + +// ObjectList is a Kubernetes object list, allows functions to work +// indistinctly with any resource that implements both runtime.Object and +// metav1.ListInterface interfaces. +// +// Semantically, this is any object which may be serialized (ObjectMeta), and +// is a kubernetes list wrapper (has items, pagination fields, etc) -- think +// the wrapper used in a response from a `kubectl list --output yaml` call. +// +// Code-wise, this means that any object which embedds both ListMeta (which +// provides metav1.ListInterface) and TypeMeta (which provides half of +// runtime.Object) and has a `DeepCopyObject` implementation (the other half of +// runtime.Object) will implement this by default. +// +// For example, nearly all the built-in XYZList types are ObjectLists, as well +// as the XYZList types for all KubeBuilder-generated CRDs (unless you do +// something real funky to them). +// +// By and large, most things that are XYZList and implement runtime.Object also +// implement ObjectList -- it's very rare to have *just* a runtime.Object +// implementation (the cases tend to be funky built-in types like Webhook +// payloads that don't have a `metadata` field). +// +// This is similar to Object, which is almost always implemented by the items +// in the list themselves. +type ObjectList interface { + metav1.ListInterface + runtime.Object +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go new file mode 100644 index 000000000..aa2299eac --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -0,0 +1,697 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" +) + +// {{{ "Functional" Option Interfaces + +// CreateOption is some configuration that modifies options for a create request. +type CreateOption interface { + // ApplyToCreate applies this configuration to the given create options. + ApplyToCreate(*CreateOptions) +} + +// DeleteOption is some configuration that modifies options for a delete request. +type DeleteOption interface { + // ApplyToDelete applies this configuration to the given delete options. + ApplyToDelete(*DeleteOptions) +} + +// ListOption is some configuration that modifies options for a list request. +type ListOption interface { + // ApplyToList applies this configuration to the given list options. + ApplyToList(*ListOptions) +} + +// UpdateOption is some configuration that modifies options for a update request. +type UpdateOption interface { + // ApplyToUpdate applies this configuration to the given update options. + ApplyToUpdate(*UpdateOptions) +} + +// PatchOption is some configuration that modifies options for a patch request. +type PatchOption interface { + // ApplyToPatch applies this configuration to the given patch options. + ApplyToPatch(*PatchOptions) +} + +// DeleteAllOfOption is some configuration that modifies options for a delete request. +type DeleteAllOfOption interface { + // ApplyToDeleteAllOf applies this configuration to the given deletecollection options. + ApplyToDeleteAllOf(*DeleteAllOfOptions) +} + +// }}} + +// {{{ Multi-Type Options + +// DryRunAll sets the "dry run" option to "all", executing all +// validation, etc without persisting the change to storage. +var DryRunAll = dryRunAll{} + +type dryRunAll struct{} + +// ApplyToCreate applies this configuration to the given create options. +func (dryRunAll) ApplyToCreate(opts *CreateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToUpdate applies this configuration to the given update options. +func (dryRunAll) ApplyToUpdate(opts *UpdateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToPatch applies this configuration to the given patch options. +func (dryRunAll) ApplyToPatch(opts *PatchOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToPatch applies this configuration to the given delete options. +func (dryRunAll) ApplyToDelete(opts *DeleteOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} +func (dryRunAll) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// FieldOwner set the field manager name for the given server-side apply patch. +type FieldOwner string + +// ApplyToPatch applies this configuration to the given patch options. +func (f FieldOwner) ApplyToPatch(opts *PatchOptions) { + opts.FieldManager = string(f) +} + +// ApplyToCreate applies this configuration to the given create options. +func (f FieldOwner) ApplyToCreate(opts *CreateOptions) { + opts.FieldManager = string(f) +} + +// ApplyToUpdate applies this configuration to the given update options. +func (f FieldOwner) ApplyToUpdate(opts *UpdateOptions) { + opts.FieldManager = string(f) +} + +// }}} + +// {{{ Create Options + +// CreateOptions contains options for create requests. It's generally a subset +// of metav1.CreateOptions. +type CreateOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw CreateOptions, as passed to the API server. + Raw *metav1.CreateOptions +} + +// AsCreateOptions returns these options as a metav1.CreateOptions. +// This may mutate the Raw field. +func (o *CreateOptions) AsCreateOptions() *metav1.CreateOptions { + if o == nil { + return &metav1.CreateOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.CreateOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +// ApplyOptions applies the given create options on these options, +// and then returns itself (for convenient chaining). +func (o *CreateOptions) ApplyOptions(opts []CreateOption) *CreateOptions { + for _, opt := range opts { + opt.ApplyToCreate(o) + } + return o +} + +// ApplyToCreate implements CreateOption. +func (o *CreateOptions) ApplyToCreate(co *CreateOptions) { + if o.DryRun != nil { + co.DryRun = o.DryRun + } + if o.FieldManager != "" { + co.FieldManager = o.FieldManager + } + if o.Raw != nil { + co.Raw = o.Raw + } +} + +var _ CreateOption = &CreateOptions{} + +// }}} + +// {{{ Delete Options + +// DeleteOptions contains options for delete requests. It's generally a subset +// of metav1.DeleteOptions. +type DeleteOptions struct { + // GracePeriodSeconds is the duration in seconds before the object should be + // deleted. Value must be non-negative integer. The value zero indicates + // delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + GracePeriodSeconds *int64 + + // Preconditions must be fulfilled before a deletion is carried out. If not + // possible, a 409 Conflict status will be returned. + Preconditions *metav1.Preconditions + + // PropagationPolicy determined whether and how garbage collection will be + // performed. Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. + PropagationPolicy *metav1.DeletionPropagation + + // Raw represents raw DeleteOptions, as passed to the API server. + Raw *metav1.DeleteOptions + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string +} + +// AsDeleteOptions returns these options as a metav1.DeleteOptions. +// This may mutate the Raw field. +func (o *DeleteOptions) AsDeleteOptions() *metav1.DeleteOptions { + if o == nil { + return &metav1.DeleteOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.DeleteOptions{} + } + + o.Raw.GracePeriodSeconds = o.GracePeriodSeconds + o.Raw.Preconditions = o.Preconditions + o.Raw.PropagationPolicy = o.PropagationPolicy + o.Raw.DryRun = o.DryRun + return o.Raw +} + +// ApplyOptions applies the given delete options on these options, +// and then returns itself (for convenient chaining). +func (o *DeleteOptions) ApplyOptions(opts []DeleteOption) *DeleteOptions { + for _, opt := range opts { + opt.ApplyToDelete(o) + } + return o +} + +var _ DeleteOption = &DeleteOptions{} + +// ApplyToDelete implements DeleteOption. +func (o *DeleteOptions) ApplyToDelete(do *DeleteOptions) { + if o.GracePeriodSeconds != nil { + do.GracePeriodSeconds = o.GracePeriodSeconds + } + if o.Preconditions != nil { + do.Preconditions = o.Preconditions + } + if o.PropagationPolicy != nil { + do.PropagationPolicy = o.PropagationPolicy + } + if o.Raw != nil { + do.Raw = o.Raw + } + if o.DryRun != nil { + do.DryRun = o.DryRun + } +} + +// GracePeriodSeconds sets the grace period for the deletion +// to the given number of seconds. +type GracePeriodSeconds int64 + +// ApplyToDelete applies this configuration to the given delete options. +func (s GracePeriodSeconds) ApplyToDelete(opts *DeleteOptions) { + secs := int64(s) + opts.GracePeriodSeconds = &secs +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (s GracePeriodSeconds) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + s.ApplyToDelete(&opts.DeleteOptions) +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions metav1.Preconditions + +// ApplyToDelete applies this configuration to the given delete options. +func (p Preconditions) ApplyToDelete(opts *DeleteOptions) { + preconds := metav1.Preconditions(p) + opts.Preconditions = &preconds +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (p Preconditions) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + p.ApplyToDelete(&opts.DeleteOptions) +} + +// PropagationPolicy determined whether and how garbage collection will be +// performed. Either this field or OrphanDependents may be set, but not both. +// The default policy is decided by the existing finalizer set in the +// metadata.finalizers and the resource-specific default policy. +// Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - +// allow the garbage collector to delete the dependents in the background; +// 'Foreground' - a cascading policy that deletes all dependents in the +// foreground. +type PropagationPolicy metav1.DeletionPropagation + +// ApplyToDelete applies the given delete options on these options. +// It will propagate to the dependents of the object to let the garbage collector handle it. +func (p PropagationPolicy) ApplyToDelete(opts *DeleteOptions) { + policy := metav1.DeletionPropagation(p) + opts.PropagationPolicy = &policy +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (p PropagationPolicy) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + p.ApplyToDelete(&opts.DeleteOptions) +} + +// }}} + +// {{{ List Options + +// ListOptions contains options for limiting or filtering results. +// It's generally a subset of metav1.ListOptions, with support for +// pre-parsed selectors (since generally, selectors will be executed +// against the cache). +type ListOptions struct { + // LabelSelector filters results by label. Use SetLabelSelector to + // set from raw string form. + LabelSelector labels.Selector + // FieldSelector filters results by a particular field. In order + // to use this with cache-based implementations, restrict usage to + // a single field-value pair that's been added to the indexers. + FieldSelector fields.Selector + + // Namespace represents the namespace to list for, or empty for + // non-namespaced objects, or to list across all namespaces. + Namespace string + + // Limit specifies the maximum number of results to return from the server. The server may + // not support this field on all resource types, but if it does and more results remain it + // will set the continue field on the returned list object. This field is not supported if watch + // is true in the Raw ListOptions. + Limit int64 + // Continue is a token returned by the server that lets a client retrieve chunks of results + // from the server by specifying limit. The server may reject requests for continuation tokens + // it does not recognize and will return a 410 error if the token can no longer be used because + // it has expired. This field is not supported if watch is true in the Raw ListOptions. + Continue string + + // Raw represents raw ListOptions, as passed to the API server. Note + // that these may not be respected by all implementations of interface, + // and the LabelSelector, FieldSelector, Limit and Continue fields are ignored. + Raw *metav1.ListOptions +} + +var _ ListOption = &ListOptions{} + +// ApplyToList implements ListOption for ListOptions. +func (o *ListOptions) ApplyToList(lo *ListOptions) { + if o.LabelSelector != nil { + lo.LabelSelector = o.LabelSelector + } + if o.FieldSelector != nil { + lo.FieldSelector = o.FieldSelector + } + if o.Namespace != "" { + lo.Namespace = o.Namespace + } + if o.Raw != nil { + lo.Raw = o.Raw + } + if o.Limit > 0 { + lo.Limit = o.Limit + } + if o.Continue != "" { + lo.Continue = o.Continue + } +} + +// AsListOptions returns these options as a flattened metav1.ListOptions. +// This may mutate the Raw field. +func (o *ListOptions) AsListOptions() *metav1.ListOptions { + if o == nil { + return &metav1.ListOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.ListOptions{} + } + if o.LabelSelector != nil { + o.Raw.LabelSelector = o.LabelSelector.String() + } + if o.FieldSelector != nil { + o.Raw.FieldSelector = o.FieldSelector.String() + } + if !o.Raw.Watch { + o.Raw.Limit = o.Limit + o.Raw.Continue = o.Continue + } + return o.Raw +} + +// ApplyOptions applies the given list options on these options, +// and then returns itself (for convenient chaining). +func (o *ListOptions) ApplyOptions(opts []ListOption) *ListOptions { + for _, opt := range opts { + opt.ApplyToList(o) + } + return o +} + +// MatchingLabels filters the list/delete operation on the given set of labels. +type MatchingLabels map[string]string + +// ApplyToList applies this configuration to the given list options. +func (m MatchingLabels) ApplyToList(opts *ListOptions) { + // TODO(directxman12): can we avoid reserializing this over and over? + sel := labels.SelectorFromValidatedSet(map[string]string(m)) + opts.LabelSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// HasLabels filters the list/delete operation checking if the set of labels exists +// without checking their values. +type HasLabels []string + +// ApplyToList applies this configuration to the given list options. +func (m HasLabels) ApplyToList(opts *ListOptions) { + sel := labels.NewSelector() + for _, label := range m { + r, err := labels.NewRequirement(label, selection.Exists, nil) + if err == nil { + sel = sel.Add(*r) + } + } + opts.LabelSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m HasLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingLabelsSelector filters the list/delete operation on the given label +// selector (or index in the case of cached lists). A struct is used because +// labels.Selector is an interface, which cannot be aliased. +type MatchingLabelsSelector struct { + labels.Selector +} + +// ApplyToList applies this configuration to the given list options. +func (m MatchingLabelsSelector) ApplyToList(opts *ListOptions) { + opts.LabelSelector = m +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingLabelsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingFields filters the list/delete operation on the given field Set +// (or index in the case of cached lists). +type MatchingFields fields.Set + +// ApplyToList applies this configuration to the given list options. +func (m MatchingFields) ApplyToList(opts *ListOptions) { + // TODO(directxman12): can we avoid re-serializing this? + sel := fields.Set(m).AsSelector() + opts.FieldSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingFields) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingFieldsSelector filters the list/delete operation on the given field +// selector (or index in the case of cached lists). A struct is used because +// fields.Selector is an interface, which cannot be aliased. +type MatchingFieldsSelector struct { + fields.Selector +} + +// ApplyToList applies this configuration to the given list options. +func (m MatchingFieldsSelector) ApplyToList(opts *ListOptions) { + opts.FieldSelector = m +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingFieldsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// InNamespace restricts the list/delete operation to the given namespace. +type InNamespace string + +// ApplyToList applies this configuration to the given list options. +func (n InNamespace) ApplyToList(opts *ListOptions) { + opts.Namespace = string(n) +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (n InNamespace) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + n.ApplyToList(&opts.ListOptions) +} + +// Limit specifies the maximum number of results to return from the server. +// Limit does not implement DeleteAllOfOption interface because the server +// does not support setting it for deletecollection operations. +type Limit int64 + +// ApplyToList applies this configuration to the given an list options. +func (l Limit) ApplyToList(opts *ListOptions) { + opts.Limit = int64(l) +} + +// Continue sets a continuation token to retrieve chunks of results when using limit. +// Continue does not implement DeleteAllOfOption interface because the server +// does not support setting it for deletecollection operations. +type Continue string + +// ApplyToList applies this configuration to the given an List options. +func (c Continue) ApplyToList(opts *ListOptions) { + opts.Continue = string(c) +} + +// }}} + +// {{{ Update Options + +// UpdateOptions contains options for create requests. It's generally a subset +// of metav1.UpdateOptions. +type UpdateOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw UpdateOptions, as passed to the API server. + Raw *metav1.UpdateOptions +} + +// AsUpdateOptions returns these options as a metav1.UpdateOptions. +// This may mutate the Raw field. +func (o *UpdateOptions) AsUpdateOptions() *metav1.UpdateOptions { + if o == nil { + return &metav1.UpdateOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.UpdateOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +// ApplyOptions applies the given update options on these options, +// and then returns itself (for convenient chaining). +func (o *UpdateOptions) ApplyOptions(opts []UpdateOption) *UpdateOptions { + for _, opt := range opts { + opt.ApplyToUpdate(o) + } + return o +} + +var _ UpdateOption = &UpdateOptions{} + +// ApplyToUpdate implements UpdateOption. +func (o *UpdateOptions) ApplyToUpdate(uo *UpdateOptions) { + if o.DryRun != nil { + uo.DryRun = o.DryRun + } + if o.FieldManager != "" { + uo.FieldManager = o.FieldManager + } + if o.Raw != nil { + uo.Raw = o.Raw + } +} + +// }}} + +// {{{ Patch Options + +// PatchOptions contains options for patch requests. +type PatchOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // Force is going to "force" Apply requests. It means user will + // re-acquire conflicting fields owned by other people. Force + // flag must be unset for non-apply patch requests. + // +optional + Force *bool + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw PatchOptions, as passed to the API server. + Raw *metav1.PatchOptions +} + +// ApplyOptions applies the given patch options on these options, +// and then returns itself (for convenient chaining). +func (o *PatchOptions) ApplyOptions(opts []PatchOption) *PatchOptions { + for _, opt := range opts { + opt.ApplyToPatch(o) + } + return o +} + +// AsPatchOptions returns these options as a metav1.PatchOptions. +// This may mutate the Raw field. +func (o *PatchOptions) AsPatchOptions() *metav1.PatchOptions { + if o == nil { + return &metav1.PatchOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.PatchOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.Force = o.Force + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +var _ PatchOption = &PatchOptions{} + +// ApplyToPatch implements PatchOptions. +func (o *PatchOptions) ApplyToPatch(po *PatchOptions) { + if o.DryRun != nil { + po.DryRun = o.DryRun + } + if o.Force != nil { + po.Force = o.Force + } + if o.FieldManager != "" { + po.FieldManager = o.FieldManager + } + if o.Raw != nil { + po.Raw = o.Raw + } +} + +// ForceOwnership indicates that in case of conflicts with server-side apply, +// the client should acquire ownership of the conflicting field. Most +// controllers should use this. +var ForceOwnership = forceOwnership{} + +type forceOwnership struct{} + +func (forceOwnership) ApplyToPatch(opts *PatchOptions) { + definitelyTrue := true + opts.Force = &definitelyTrue +} + +// }}} + +// {{{ DeleteAllOf Options + +// these are all just delete options and list options + +// DeleteAllOfOptions contains options for deletecollection (deleteallof) requests. +// It's just list and delete options smooshed together. +type DeleteAllOfOptions struct { + ListOptions + DeleteOptions +} + +// ApplyOptions applies the given deleteallof options on these options, +// and then returns itself (for convenient chaining). +func (o *DeleteAllOfOptions) ApplyOptions(opts []DeleteAllOfOption) *DeleteAllOfOptions { + for _, opt := range opts { + opt.ApplyToDeleteAllOf(o) + } + return o +} + +var _ DeleteAllOfOption = &DeleteAllOfOptions{} + +// ApplyToDeleteAllOf implements DeleteAllOfOption. +func (o *DeleteAllOfOptions) ApplyToDeleteAllOf(do *DeleteAllOfOptions) { + o.ApplyToList(&do.ListOptions) + o.ApplyToDelete(&do.DeleteOptions) +} + +// }}} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go new file mode 100644 index 000000000..10984c534 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go @@ -0,0 +1,213 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/strategicpatch" +) + +var ( + // Apply uses server-side apply to patch the given object. + Apply Patch = applyPatch{} + + // Merge uses the raw object as a merge patch, without modifications. + // Use MergeFrom if you wish to compute a diff instead. + Merge Patch = mergePatch{} +) + +type patch struct { + patchType types.PatchType + data []byte +} + +// Type implements Patch. +func (s *patch) Type() types.PatchType { + return s.patchType +} + +// Data implements Patch. +func (s *patch) Data(obj Object) ([]byte, error) { + return s.data, nil +} + +// RawPatch constructs a new Patch with the given PatchType and data. +func RawPatch(patchType types.PatchType, data []byte) Patch { + return &patch{patchType, data} +} + +// MergeFromWithOptimisticLock can be used if clients want to make sure a patch +// is being applied to the latest resource version of an object. +// +// The behavior is similar to what an Update would do, without the need to send the +// whole object. Usually this method is useful if you might have multiple clients +// acting on the same object and the same API version, but with different versions of the Go structs. +// +// For example, an "older" copy of a Widget that has fields A and B, and a "newer" copy with A, B, and C. +// Sending an update using the older struct definition results in C being dropped, whereas using a patch does not. +type MergeFromWithOptimisticLock struct{} + +// ApplyToMergeFrom applies this configuration to the given patch options. +func (m MergeFromWithOptimisticLock) ApplyToMergeFrom(in *MergeFromOptions) { + in.OptimisticLock = true +} + +// MergeFromOption is some configuration that modifies options for a merge-from patch data. +type MergeFromOption interface { + // ApplyToMergeFrom applies this configuration to the given patch options. + ApplyToMergeFrom(*MergeFromOptions) +} + +// MergeFromOptions contains options to generate a merge-from patch data. +type MergeFromOptions struct { + // OptimisticLock, when true, includes `metadata.resourceVersion` into the final + // patch data. If the `resourceVersion` field doesn't match what's stored, + // the operation results in a conflict and clients will need to try again. + OptimisticLock bool +} + +type mergeFromPatch struct { + patchType types.PatchType + createPatch func(originalJSON, modifiedJSON []byte, dataStruct interface{}) ([]byte, error) + from Object + opts MergeFromOptions +} + +// Type implements Patch. +func (s *mergeFromPatch) Type() types.PatchType { + return s.patchType +} + +// Data implements Patch. +func (s *mergeFromPatch) Data(obj Object) ([]byte, error) { + original := s.from + modified := obj + + if s.opts.OptimisticLock { + version := original.GetResourceVersion() + if len(version) == 0 { + return nil, fmt.Errorf("cannot use OptimisticLock, object %q does not have any resource version we can use", original) + } + + original = original.DeepCopyObject().(Object) + original.SetResourceVersion("") + + modified = modified.DeepCopyObject().(Object) + modified.SetResourceVersion(version) + } + + originalJSON, err := json.Marshal(original) + if err != nil { + return nil, err + } + + modifiedJSON, err := json.Marshal(modified) + if err != nil { + return nil, err + } + + data, err := s.createPatch(originalJSON, modifiedJSON, obj) + if err != nil { + return nil, err + } + + return data, nil +} + +func createMergePatch(originalJSON, modifiedJSON []byte, _ interface{}) ([]byte, error) { + return jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) +} + +func createStrategicMergePatch(originalJSON, modifiedJSON []byte, dataStruct interface{}) ([]byte, error) { + return strategicpatch.CreateTwoWayMergePatch(originalJSON, modifiedJSON, dataStruct) +} + +// MergeFrom creates a Patch that patches using the merge-patch strategy with the given object as base. +// The difference between MergeFrom and StrategicMergeFrom lays in the handling of modified list fields. +// When using MergeFrom, existing lists will be completely replaced by new lists. +// When using StrategicMergeFrom, the list field's `patchStrategy` is respected if specified in the API type, +// e.g. the existing list is not replaced completely but rather merged with the new one using the list's `patchMergeKey`. +// See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ for more details on +// the difference between merge-patch and strategic-merge-patch. +func MergeFrom(obj Object) Patch { + return &mergeFromPatch{patchType: types.MergePatchType, createPatch: createMergePatch, from: obj} +} + +// MergeFromWithOptions creates a Patch that patches using the merge-patch strategy with the given object as base. +// See MergeFrom for more details. +func MergeFromWithOptions(obj Object, opts ...MergeFromOption) Patch { + options := &MergeFromOptions{} + for _, opt := range opts { + opt.ApplyToMergeFrom(options) + } + return &mergeFromPatch{patchType: types.MergePatchType, createPatch: createMergePatch, from: obj, opts: *options} +} + +// StrategicMergeFrom creates a Patch that patches using the strategic-merge-patch strategy with the given object as base. +// The difference between MergeFrom and StrategicMergeFrom lays in the handling of modified list fields. +// When using MergeFrom, existing lists will be completely replaced by new lists. +// When using StrategicMergeFrom, the list field's `patchStrategy` is respected if specified in the API type, +// e.g. the existing list is not replaced completely but rather merged with the new one using the list's `patchMergeKey`. +// See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ for more details on +// the difference between merge-patch and strategic-merge-patch. +// Please note, that CRDs don't support strategic-merge-patch, see +// https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility +func StrategicMergeFrom(obj Object, opts ...MergeFromOption) Patch { + options := &MergeFromOptions{} + for _, opt := range opts { + opt.ApplyToMergeFrom(options) + } + return &mergeFromPatch{patchType: types.StrategicMergePatchType, createPatch: createStrategicMergePatch, from: obj, opts: *options} +} + +// mergePatch uses a raw merge strategy to patch the object. +type mergePatch struct{} + +// Type implements Patch. +func (p mergePatch) Type() types.PatchType { + return types.MergePatchType +} + +// Data implements Patch. +func (p mergePatch) Data(obj Object) ([]byte, error) { + // NB(directxman12): we might technically want to be using an actual encoder + // here (in case some more performant encoder is introduced) but this is + // correct and sufficient for our uses (it's what the JSON serializer in + // client-go does, more-or-less). + return json.Marshal(obj) +} + +// applyPatch uses server-side apply to patch the object. +type applyPatch struct{} + +// Type implements Patch. +func (p applyPatch) Type() types.PatchType { + return types.ApplyPatchType +} + +// Data implements Patch. +func (p applyPatch) Data(obj Object) ([]byte, error) { + // NB(directxman12): we might technically want to be using an actual encoder + // here (in case some more performant encoder is introduced) but this is + // correct and sufficient for our uses (it's what the JSON serializer in + // client-go does, more-or-less). + return json.Marshal(obj) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go new file mode 100644 index 000000000..bf4b861f3 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go @@ -0,0 +1,141 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// NewDelegatingClientInput encapsulates the input parameters to create a new delegating client. +type NewDelegatingClientInput struct { + CacheReader Reader + Client Client + UncachedObjects []Object + CacheUnstructured bool +} + +// NewDelegatingClient creates a new delegating client. +// +// A delegating client forms a Client by composing separate reader, writer and +// statusclient interfaces. This way, you can have an Client that reads from a +// cache and writes to the API server. +func NewDelegatingClient(in NewDelegatingClientInput) (Client, error) { + uncachedGVKs := map[schema.GroupVersionKind]struct{}{} + for _, obj := range in.UncachedObjects { + gvk, err := apiutil.GVKForObject(obj, in.Client.Scheme()) + if err != nil { + return nil, err + } + uncachedGVKs[gvk] = struct{}{} + } + + return &delegatingClient{ + scheme: in.Client.Scheme(), + mapper: in.Client.RESTMapper(), + Reader: &delegatingReader{ + CacheReader: in.CacheReader, + ClientReader: in.Client, + scheme: in.Client.Scheme(), + uncachedGVKs: uncachedGVKs, + cacheUnstructured: in.CacheUnstructured, + }, + Writer: in.Client, + StatusClient: in.Client, + }, nil +} + +type delegatingClient struct { + Reader + Writer + StatusClient + + scheme *runtime.Scheme + mapper meta.RESTMapper +} + +// Scheme returns the scheme this client is using. +func (d *delegatingClient) Scheme() *runtime.Scheme { + return d.scheme +} + +// RESTMapper returns the rest mapper this client is using. +func (d *delegatingClient) RESTMapper() meta.RESTMapper { + return d.mapper +} + +// delegatingReader forms a Reader that will cause Get and List requests for +// unstructured types to use the ClientReader while requests for any other type +// of object with use the CacheReader. This avoids accidentally caching the +// entire cluster in the common case of loading arbitrary unstructured objects +// (e.g. from OwnerReferences). +type delegatingReader struct { + CacheReader Reader + ClientReader Reader + + uncachedGVKs map[schema.GroupVersionKind]struct{} + scheme *runtime.Scheme + cacheUnstructured bool +} + +func (d *delegatingReader) shouldBypassCache(obj runtime.Object) (bool, error) { + gvk, err := apiutil.GVKForObject(obj, d.scheme) + if err != nil { + return false, err + } + // TODO: this is producing unsafe guesses that don't actually work, + // but it matches ~99% of the cases out there. + if meta.IsListType(obj) { + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + } + if _, isUncached := d.uncachedGVKs[gvk]; isUncached { + return true, nil + } + if !d.cacheUnstructured { + _, isUnstructured := obj.(*unstructured.Unstructured) + _, isUnstructuredList := obj.(*unstructured.UnstructuredList) + return isUnstructured || isUnstructuredList, nil + } + return false, nil +} + +// Get retrieves an obj for a given object key from the Kubernetes Cluster. +func (d *delegatingReader) Get(ctx context.Context, key ObjectKey, obj Object) error { + if isUncached, err := d.shouldBypassCache(obj); err != nil { + return err + } else if isUncached { + return d.ClientReader.Get(ctx, key, obj) + } + return d.CacheReader.Get(ctx, key, obj) +} + +// List retrieves list of objects for a given namespace and list options. +func (d *delegatingReader) List(ctx context.Context, list ObjectList, opts ...ListOption) error { + if isUncached, err := d.shouldBypassCache(list); err != nil { + return err + } else if isUncached { + return d.ClientReader.List(ctx, list, opts...) + } + return d.CacheReader.List(ctx, list, opts...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go new file mode 100644 index 000000000..dde7b21f2 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go @@ -0,0 +1,205 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" +) + +var _ Reader = &typedClient{} +var _ Writer = &typedClient{} +var _ StatusWriter = &typedClient{} + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type typedClient struct { + cache *clientCache + paramCodec runtime.ParameterCodec +} + +// Create implements client.Client. +func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + createOpts := &CreateOptions{} + createOpts.ApplyOptions(opts) + return o.Post(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Body(obj). + VersionedParams(createOpts.AsCreateOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// Update implements client.Client. +func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + updateOpts := &UpdateOptions{} + updateOpts.ApplyOptions(opts) + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(obj). + VersionedParams(updateOpts.AsUpdateOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// Delete implements client.Client. +func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + + return o.Delete(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(deleteOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// DeleteAllOf implements client.Client. +func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + + return o.Delete(). + NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()). + Resource(o.resource()). + VersionedParams(deleteAllOfOpts.AsListOptions(), c.paramCodec). + Body(deleteAllOfOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// Patch implements client.Client. +func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec). + Body(data). + Do(ctx). + Into(obj) +} + +// Get implements client.Client. +func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + r, err := c.cache.getResource(obj) + if err != nil { + return err + } + return r.Get(). + NamespaceIfScoped(key.Namespace, r.isNamespaced()). + Resource(r.resource()). + Name(key.Name).Do(ctx).Into(obj) +} + +// List implements client.Client. +func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + r, err := c.cache.getResource(obj) + if err != nil { + return err + } + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// UpdateStatus used by StatusWriter to write status. +func (c *typedClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + // TODO(droot): examine the returned error and check if it error needs to be + // wrapped to improve the UX ? + // It will be nice to receive an error saying the object doesn't implement + // status subresource and check CRD definition + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(obj). + VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// PatchStatus used by StatusWriter to write status. +func (c *typedClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(data). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go new file mode 100644 index 000000000..dcf15be27 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go @@ -0,0 +1,277 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +var _ Reader = &unstructuredClient{} +var _ Writer = &unstructuredClient{} +var _ StatusWriter = &unstructuredClient{} + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type unstructuredClient struct { + cache *clientCache + paramCodec runtime.ParameterCodec +} + +// Create implements client.Client. +func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + createOpts := &CreateOptions{} + createOpts.ApplyOptions(opts) + result := o.Post(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Body(obj). + VersionedParams(createOpts.AsCreateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) + + u.SetGroupVersionKind(gvk) + return result +} + +// Update implements client.Client. +func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + updateOpts := UpdateOptions{} + updateOpts.ApplyOptions(opts) + result := o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(obj). + VersionedParams(updateOpts.AsUpdateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) + + u.SetGroupVersionKind(gvk) + return result +} + +// Delete implements client.Client. +func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + _, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + return o.Delete(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(deleteOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// DeleteAllOf implements client.Client. +func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + _, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + return o.Delete(). + NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()). + Resource(o.resource()). + VersionedParams(deleteAllOfOpts.AsListOptions(), uc.paramCodec). + Body(deleteAllOfOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// Patch implements client.Client. +func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + _, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), uc.paramCodec). + Body(data). + Do(ctx). + Into(obj) +} + +// Get implements client.Client. +func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + r, err := uc.cache.getResource(obj) + if err != nil { + return err + } + + result := r.Get(). + NamespaceIfScoped(key.Namespace, r.isNamespaced()). + Resource(r.resource()). + Name(key.Name). + Do(ctx). + Into(obj) + + u.SetGroupVersionKind(gvk) + + return result +} + +// List implements client.Client. +func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + u, ok := obj.(*unstructured.UnstructuredList) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + if strings.HasSuffix(gvk.Kind, "List") { + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + + r, err := uc.cache.getResource(obj) + if err != nil { + return err + } + + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), uc.paramCodec). + Do(ctx). + Into(obj) +} + +func (uc *unstructuredClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error { + _, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(obj). + VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) +} + +func (uc *unstructuredClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + result := o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(data). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), uc.paramCodec). + Do(ctx). + Into(u) + + u.SetGroupVersionKind(gvk) + return result +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go new file mode 100644 index 000000000..765ca5daa --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go @@ -0,0 +1,118 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" +) + +// NewWithWatch returns a new WithWatch. +func NewWithWatch(config *rest.Config, options Options) (WithWatch, error) { + client, err := newClient(config, options) + if err != nil { + return nil, err + } + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + return nil, err + } + return &watchingClient{client: client, dynamic: dynamicClient}, nil +} + +type watchingClient struct { + *client + dynamic dynamic.Interface +} + +func (w *watchingClient) Watch(ctx context.Context, list ObjectList, opts ...ListOption) (watch.Interface, error) { + switch l := list.(type) { + case *unstructured.UnstructuredList: + return w.unstructuredWatch(ctx, l, opts...) + case *metav1.PartialObjectMetadataList: + return w.metadataWatch(ctx, l, opts...) + default: + return w.typedWatch(ctx, l, opts...) + } +} + +func (w *watchingClient) listOpts(opts ...ListOption) ListOptions { + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + if listOpts.Raw == nil { + listOpts.Raw = &metav1.ListOptions{} + } + listOpts.Raw.Watch = true + + return listOpts +} + +func (w *watchingClient) metadataWatch(ctx context.Context, obj *metav1.PartialObjectMetadataList, opts ...ListOption) (watch.Interface, error) { + gvk := obj.GroupVersionKind() + if strings.HasSuffix(gvk.Kind, "List") { + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + listOpts := w.listOpts(opts...) + + resInt, err := w.client.metadataClient.getResourceInterface(gvk, listOpts.Namespace) + if err != nil { + return nil, err + } + + return resInt.Watch(ctx, *listOpts.AsListOptions()) +} + +func (w *watchingClient) unstructuredWatch(ctx context.Context, obj *unstructured.UnstructuredList, opts ...ListOption) (watch.Interface, error) { + gvk := obj.GroupVersionKind() + if strings.HasSuffix(gvk.Kind, "List") { + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + r, err := w.client.unstructuredClient.cache.getResource(obj) + if err != nil { + return nil, err + } + + listOpts := w.listOpts(opts...) + + if listOpts.Namespace != "" && r.isNamespaced() { + return w.dynamic.Resource(r.mapping.Resource).Namespace(listOpts.Namespace).Watch(ctx, *listOpts.AsListOptions()) + } + return w.dynamic.Resource(r.mapping.Resource).Watch(ctx, *listOpts.AsListOptions()) +} + +func (w *watchingClient) typedWatch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error) { + r, err := w.client.typedClient.cache.getResource(obj) + if err != nil { + return nil, err + } + + listOpts := w.listOpts(opts...) + + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), w.client.typedClient.paramCodec). + Watch(ctx) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/conversion/conversion.go b/vendor/sigs.k8s.io/controller-runtime/pkg/conversion/conversion.go new file mode 100644 index 000000000..da32ab48e --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/conversion/conversion.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package conversion provides interface definitions that an API Type needs to +implement for it to be supported by the generic conversion webhook handler +defined under pkg/webhook/conversion. +*/ +package conversion + +import "k8s.io/apimachinery/pkg/runtime" + +// Convertible defines capability of a type to convertible i.e. it can be converted to/from a hub type. +type Convertible interface { + runtime.Object + ConvertTo(dst Hub) error + ConvertFrom(src Hub) error +} + +// Hub marks that a given type is the hub type for conversion. This means that +// all conversions will first convert to the hub type, then convert from the hub +// type to the destination type. All types besides the hub type should implement +// Convertible. +type Hub interface { + runtime.Object + Hub() +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go new file mode 100644 index 000000000..a15c1daca --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go @@ -0,0 +1,451 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + k8syaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/util/retry" + "k8s.io/utils/pointer" + "sigs.k8s.io/yaml" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/conversion" +) + +// CRDInstallOptions are the options for installing CRDs. +type CRDInstallOptions struct { + // Scheme is used to determine if conversion webhooks should be enabled + // for a particular CRD / object. + // + // Conversion webhooks are going to be enabled if an object in the scheme + // implements Hub and Spoke conversions. + // + // If nil, scheme.Scheme is used. + Scheme *runtime.Scheme + + // Paths is a list of paths to the directories or files containing CRDs + Paths []string + + // CRDs is a list of CRDs to install + CRDs []*apiextensionsv1.CustomResourceDefinition + + // ErrorIfPathMissing will cause an error if a Path does not exist + ErrorIfPathMissing bool + + // MaxTime is the max time to wait + MaxTime time.Duration + + // PollInterval is the interval to check + PollInterval time.Duration + + // CleanUpAfterUse will cause the CRDs listed for installation to be + // uninstalled when terminating the test environment. + // Defaults to false. + CleanUpAfterUse bool + + // WebhookOptions contains the conversion webhook information to install + // on the CRDs. This field is usually inherited by the EnvTest options. + // + // If you're passing this field manually, you need to make sure that + // the CA information and host port is filled in properly. + WebhookOptions WebhookInstallOptions +} + +const defaultPollInterval = 100 * time.Millisecond +const defaultMaxWait = 10 * time.Second + +// InstallCRDs installs a collection of CRDs into a cluster by reading the crd yaml files from a directory. +func InstallCRDs(config *rest.Config, options CRDInstallOptions) ([]*apiextensionsv1.CustomResourceDefinition, error) { + defaultCRDOptions(&options) + + // Read the CRD yamls into options.CRDs + if err := readCRDFiles(&options); err != nil { + return nil, fmt.Errorf("unable to read CRD files: %w", err) + } + + if err := modifyConversionWebhooks(options.CRDs, options.Scheme, options.WebhookOptions); err != nil { + return nil, err + } + + // Create the CRDs in the apiserver + if err := CreateCRDs(config, options.CRDs); err != nil { + return options.CRDs, fmt.Errorf("unable to create CRD instances: %w", err) + } + + // Wait for the CRDs to appear as Resources in the apiserver + if err := WaitForCRDs(config, options.CRDs, options); err != nil { + return options.CRDs, fmt.Errorf("something went wrong waiting for CRDs to appear as API resources: %w", err) + } + + return options.CRDs, nil +} + +// readCRDFiles reads the directories of CRDs in options.Paths and adds the CRD structs to options.CRDs. +func readCRDFiles(options *CRDInstallOptions) error { + if len(options.Paths) > 0 { + crdList, err := renderCRDs(options) + if err != nil { + return err + } + + options.CRDs = append(options.CRDs, crdList...) + } + return nil +} + +// defaultCRDOptions sets the default values for CRDs. +func defaultCRDOptions(o *CRDInstallOptions) { + if o.Scheme == nil { + o.Scheme = scheme.Scheme + } + if o.MaxTime == 0 { + o.MaxTime = defaultMaxWait + } + if o.PollInterval == 0 { + o.PollInterval = defaultPollInterval + } +} + +// WaitForCRDs waits for the CRDs to appear in discovery. +func WaitForCRDs(config *rest.Config, crds []*apiextensionsv1.CustomResourceDefinition, options CRDInstallOptions) error { + // Add each CRD to a map of GroupVersion to Resource + waitingFor := map[schema.GroupVersion]*sets.String{} + for _, crd := range crds { + gvs := []schema.GroupVersion{} + for _, version := range crd.Spec.Versions { + if version.Served { + gvs = append(gvs, schema.GroupVersion{Group: crd.Spec.Group, Version: version.Name}) + } + } + + for _, gv := range gvs { + log.V(1).Info("adding API in waitlist", "GV", gv) + if _, found := waitingFor[gv]; !found { + // Initialize the set + waitingFor[gv] = &sets.String{} + } + // Add the Resource + waitingFor[gv].Insert(crd.Spec.Names.Plural) + } + } + + // Poll until all resources are found in discovery + p := &poller{config: config, waitingFor: waitingFor} + return wait.PollImmediate(options.PollInterval, options.MaxTime, p.poll) +} + +// poller checks if all the resources have been found in discovery, and returns false if not. +type poller struct { + // config is used to get discovery + config *rest.Config + + // waitingFor is the map of resources keyed by group version that have not yet been found in discovery + waitingFor map[schema.GroupVersion]*sets.String +} + +// poll checks if all the resources have been found in discovery, and returns false if not. +func (p *poller) poll() (done bool, err error) { + // Create a new clientset to avoid any client caching of discovery + cs, err := clientset.NewForConfig(p.config) + if err != nil { + return false, err + } + + allFound := true + for gv, resources := range p.waitingFor { + // All resources found, do nothing + if resources.Len() == 0 { + delete(p.waitingFor, gv) + continue + } + + // Get the Resources for this GroupVersion + // TODO: Maybe the controller-runtime client should be able to do this... + resourceList, err := cs.Discovery().ServerResourcesForGroupVersion(gv.Group + "/" + gv.Version) + if err != nil { + return false, nil //nolint:nilerr + } + + // Remove each found resource from the resources set that we are waiting for + for _, resource := range resourceList.APIResources { + resources.Delete(resource.Name) + } + + // Still waiting on some resources in this group version + if resources.Len() != 0 { + allFound = false + } + } + return allFound, nil +} + +// UninstallCRDs uninstalls a collection of CRDs by reading the crd yaml files from a directory. +func UninstallCRDs(config *rest.Config, options CRDInstallOptions) error { + // Read the CRD yamls into options.CRDs + if err := readCRDFiles(&options); err != nil { + return err + } + + // Delete the CRDs from the apiserver + cs, err := client.New(config, client.Options{}) + if err != nil { + return err + } + + // Uninstall each CRD + for _, crd := range options.CRDs { + crd := crd + log.V(1).Info("uninstalling CRD", "crd", crd.GetName()) + if err := cs.Delete(context.TODO(), crd); err != nil { + // If CRD is not found, we can consider success + if !apierrors.IsNotFound(err) { + return err + } + } + } + + return nil +} + +// CreateCRDs creates the CRDs. +func CreateCRDs(config *rest.Config, crds []*apiextensionsv1.CustomResourceDefinition) error { + cs, err := client.New(config, client.Options{}) + if err != nil { + return fmt.Errorf("unable to create client: %w", err) + } + + // Create each CRD + for _, crd := range crds { + crd := crd + log.V(1).Info("installing CRD", "crd", crd.GetName()) + existingCrd := crd.DeepCopy() + err := cs.Get(context.TODO(), client.ObjectKey{Name: crd.GetName()}, existingCrd) + switch { + case apierrors.IsNotFound(err): + if err := cs.Create(context.TODO(), crd); err != nil { + return fmt.Errorf("unable to create CRD %q: %w", crd.GetName(), err) + } + case err != nil: + return fmt.Errorf("unable to get CRD %q to check if it exists: %w", crd.GetName(), err) + default: + log.V(1).Info("CRD already exists, updating", "crd", crd.GetName()) + if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := cs.Get(context.TODO(), client.ObjectKey{Name: crd.GetName()}, existingCrd); err != nil { + return err + } + crd.SetResourceVersion(existingCrd.GetResourceVersion()) + return cs.Update(context.TODO(), crd) + }); err != nil { + return err + } + } + } + return nil +} + +// renderCRDs iterate through options.Paths and extract all CRD files. +func renderCRDs(options *CRDInstallOptions) ([]*apiextensionsv1.CustomResourceDefinition, error) { + var ( + err error + info os.FileInfo + files []os.FileInfo + ) + + type GVKN struct { + GVK schema.GroupVersionKind + Name string + } + + crds := map[GVKN]*apiextensionsv1.CustomResourceDefinition{} + + for _, path := range options.Paths { + var filePath = path + + // Return the error if ErrorIfPathMissing exists + if info, err = os.Stat(path); os.IsNotExist(err) { + if options.ErrorIfPathMissing { + return nil, err + } + continue + } + + if !info.IsDir() { + filePath, files = filepath.Dir(path), []os.FileInfo{info} + } else if files, err = ioutil.ReadDir(path); err != nil { + return nil, err + } + + log.V(1).Info("reading CRDs from path", "path", path) + crdList, err := readCRDs(filePath, files) + if err != nil { + return nil, err + } + + for i, crd := range crdList { + gvkn := GVKN{GVK: crd.GroupVersionKind(), Name: crd.GetName()} + if _, found := crds[gvkn]; found { + // Currently, we only print a log when there are duplicates. We may want to error out if that makes more sense. + log.Info("there are more than one CRD definitions with the same ", "GVKN", gvkn) + } + // We always use the CRD definition that we found last. + crds[gvkn] = crdList[i] + } + } + + // Converting map to a list to return + res := []*apiextensionsv1.CustomResourceDefinition{} + for _, obj := range crds { + res = append(res, obj) + } + return res, nil +} + +// modifyConversionWebhooks takes all the registered CustomResourceDefinitions and applies modifications +// to conditionally enable webhooks if the type is registered within the scheme. +func modifyConversionWebhooks(crds []*apiextensionsv1.CustomResourceDefinition, scheme *runtime.Scheme, webhookOptions WebhookInstallOptions) error { + if len(webhookOptions.LocalServingCAData) == 0 { + return nil + } + + // Determine all registered convertible types. + convertibles := map[schema.GroupKind]struct{}{} + for gvk := range scheme.AllKnownTypes() { + obj, err := scheme.New(gvk) + if err != nil { + return err + } + if ok, err := conversion.IsConvertible(scheme, obj); ok && err == nil { + convertibles[gvk.GroupKind()] = struct{}{} + } + } + + // generate host port. + hostPort, err := webhookOptions.generateHostPort() + if err != nil { + return err + } + url := pointer.StringPtr(fmt.Sprintf("https://%s/convert", hostPort)) + + for i := range crds { + // Continue if we're preserving unknown fields. + if crds[i].Spec.PreserveUnknownFields { + continue + } + // Continue if the GroupKind isn't registered as being convertible. + if _, ok := convertibles[schema.GroupKind{ + Group: crds[i].Spec.Group, + Kind: crds[i].Spec.Names.Kind, + }]; !ok { + continue + } + if crds[i].Spec.Conversion == nil { + crds[i].Spec.Conversion = &apiextensionsv1.CustomResourceConversion{ + Webhook: &apiextensionsv1.WebhookConversion{}, + } + } + crds[i].Spec.Conversion.Strategy = apiextensionsv1.WebhookConverter + crds[i].Spec.Conversion.Webhook.ConversionReviewVersions = []string{"v1", "v1beta1"} + crds[i].Spec.Conversion.Webhook.ClientConfig = &apiextensionsv1.WebhookClientConfig{ + Service: nil, + URL: url, + CABundle: webhookOptions.LocalServingCAData, + } + } + + return nil +} + +// readCRDs reads the CRDs from files and Unmarshals them into structs. +func readCRDs(basePath string, files []os.FileInfo) ([]*apiextensionsv1.CustomResourceDefinition, error) { + var crds []*apiextensionsv1.CustomResourceDefinition + + // White list the file extensions that may contain CRDs + crdExts := sets.NewString(".json", ".yaml", ".yml") + + for _, file := range files { + // Only parse allowlisted file types + if !crdExts.Has(filepath.Ext(file.Name())) { + continue + } + + // Unmarshal CRDs from file into structs + docs, err := readDocuments(filepath.Join(basePath, file.Name())) + if err != nil { + return nil, err + } + + for _, doc := range docs { + crd := &apiextensionsv1.CustomResourceDefinition{} + if err = yaml.Unmarshal(doc, crd); err != nil { + return nil, err + } + + if crd.Kind != "CustomResourceDefinition" || crd.Spec.Names.Kind == "" || crd.Spec.Group == "" { + continue + } + crds = append(crds, crd) + } + + log.V(1).Info("read CRDs from file", "file", file.Name()) + } + return crds, nil +} + +// readDocuments reads documents from file. +func readDocuments(fp string) ([][]byte, error) { + b, err := ioutil.ReadFile(fp) //nolint:gosec + if err != nil { + return nil, err + } + + docs := [][]byte{} + reader := k8syaml.NewYAMLReader(bufio.NewReader(bytes.NewReader(b))) + for { + // Read document + doc, err := reader.Read() + if err != nil { + if err == io.EOF { + break + } + + return nil, err + } + + docs = append(docs, doc) + } + + return docs, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/doc.go new file mode 100644 index 000000000..412e794cc --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/doc.go @@ -0,0 +1,26 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package envtest provides libraries for integration testing by starting a local control plane +// +// Control plane binaries (etcd and kube-apiserver) are loaded by default from +// /usr/local/kubebuilder/bin. This can be overridden by setting the +// KUBEBUILDER_ASSETS environment variable, or by directly creating a +// ControlPlane for the Environment to use. +// +// Environment can also be configured to work with an existing cluster, and +// simply load CRDs and provide client configuration. +package envtest diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go new file mode 100644 index 000000000..d3b52017d --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go @@ -0,0 +1,69 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/client-go/kubernetes/scheme" +) + +var ( + crdScheme = scheme.Scheme +) + +// init is required to correctly initialize the crdScheme package variable. +func init() { + _ = apiextensionsv1.AddToScheme(crdScheme) +} + +// mergePaths merges two string slices containing paths. +// This function makes no guarantees about order of the merged slice. +func mergePaths(s1, s2 []string) []string { + m := make(map[string]struct{}) + for _, s := range s1 { + m[s] = struct{}{} + } + for _, s := range s2 { + m[s] = struct{}{} + } + merged := make([]string, len(m)) + i := 0 + for key := range m { + merged[i] = key + i++ + } + return merged +} + +// mergeCRDs merges two CRD slices using their names. +// This function makes no guarantees about order of the merged slice. +func mergeCRDs(s1, s2 []*apiextensionsv1.CustomResourceDefinition) []*apiextensionsv1.CustomResourceDefinition { + m := make(map[string]*apiextensionsv1.CustomResourceDefinition) + for _, obj := range s1 { + m[obj.GetName()] = obj + } + for _, obj := range s2 { + m[obj.GetName()] = obj + } + merged := make([]*apiextensionsv1.CustomResourceDefinition, len(m)) + i := 0 + for _, obj := range m { + merged[i] = obj.DeepCopy() + i++ + } + return merged +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go new file mode 100644 index 000000000..5347f074d --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go @@ -0,0 +1,375 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "fmt" + "os" + "strings" + "time" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/client/config" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +var log = logf.RuntimeLog.WithName("test-env") + +/* +It's possible to override some defaults, by setting the following environment variables: + USE_EXISTING_CLUSTER (boolean): if set to true, envtest will use an existing cluster + TEST_ASSET_KUBE_APISERVER (string): path to the api-server binary to use + TEST_ASSET_ETCD (string): path to the etcd binary to use + TEST_ASSET_KUBECTL (string): path to the kubectl binary to use + KUBEBUILDER_ASSETS (string): directory containing the binaries to use (api-server, etcd and kubectl). Defaults to /usr/local/kubebuilder/bin. + KUBEBUILDER_CONTROLPLANE_START_TIMEOUT (string supported by time.ParseDuration): timeout for test control plane to start. Defaults to 20s. + KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT (string supported by time.ParseDuration): timeout for test control plane to start. Defaults to 20s. + KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT (boolean): if set to true, the control plane's stdout and stderr are attached to os.Stdout and os.Stderr + +*/ +const ( + envUseExistingCluster = "USE_EXISTING_CLUSTER" + envStartTimeout = "KUBEBUILDER_CONTROLPLANE_START_TIMEOUT" + envStopTimeout = "KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT" + envAttachOutput = "KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT" + StartTimeout = 60 + StopTimeout = 60 + + defaultKubebuilderControlPlaneStartTimeout = 20 * time.Second + defaultKubebuilderControlPlaneStopTimeout = 20 * time.Second +) + +// internal types we expose as part of our public API. +type ( + // ControlPlane is the re-exported ControlPlane type from the internal testing package. + ControlPlane = controlplane.ControlPlane + + // APIServer is the re-exported APIServer from the internal testing package. + APIServer = controlplane.APIServer + + // Etcd is the re-exported Etcd from the internal testing package. + Etcd = controlplane.Etcd + + // User represents a Kubernetes user to provision for auth purposes. + User = controlplane.User + + // AuthenticatedUser represets a Kubernetes user that's been provisioned. + AuthenticatedUser = controlplane.AuthenticatedUser + + // ListenAddr indicates the address and port that the API server should listen on. + ListenAddr = process.ListenAddr + + // SecureServing contains details describing how the API server should serve + // its secure endpoint. + SecureServing = controlplane.SecureServing + + // Authn is an authentication method that can be used with the control plane to + // provision users. + Authn = controlplane.Authn + + // Arguments allows configuring a process's flags. + Arguments = process.Arguments + + // Arg is a single flag with one or more values. + Arg = process.Arg +) + +var ( + // EmptyArguments constructs a new set of flags with nothing set. + // + // This is mostly useful for testing helper methods -- you'll want to call + // Configure on the APIServer (or etcd) to configure their arguments. + EmptyArguments = process.EmptyArguments +) + +// Environment creates a Kubernetes test environment that will start / stop the Kubernetes control plane and +// install extension APIs. +type Environment struct { + // ControlPlane is the ControlPlane including the apiserver and etcd + ControlPlane controlplane.ControlPlane + + // Scheme is used to determine if conversion webhooks should be enabled + // for a particular CRD / object. + // + // Conversion webhooks are going to be enabled if an object in the scheme + // implements Hub and Spoke conversions. + // + // If nil, scheme.Scheme is used. + Scheme *runtime.Scheme + + // Config can be used to talk to the apiserver. It's automatically + // populated if not set using the standard controller-runtime config + // loading. + Config *rest.Config + + // CRDInstallOptions are the options for installing CRDs. + CRDInstallOptions CRDInstallOptions + + // WebhookInstallOptions are the options for installing webhooks. + WebhookInstallOptions WebhookInstallOptions + + // ErrorIfCRDPathMissing provides an interface for the underlying + // CRDInstallOptions.ErrorIfPathMissing. It prevents silent failures + // for missing CRD paths. + ErrorIfCRDPathMissing bool + + // CRDs is a list of CRDs to install. + // If both this field and CRDs field in CRDInstallOptions are specified, the + // values are merged. + CRDs []*apiextensionsv1.CustomResourceDefinition + + // CRDDirectoryPaths is a list of paths containing CRD yaml or json configs. + // If both this field and Paths field in CRDInstallOptions are specified, the + // values are merged. + CRDDirectoryPaths []string + + // BinaryAssetsDirectory is the path where the binaries required for the envtest are + // located in the local environment. This field can be overridden by setting KUBEBUILDER_ASSETS. + BinaryAssetsDirectory string + + // UseExistingCluster indicates that this environments should use an + // existing kubeconfig, instead of trying to stand up a new control plane. + // This is useful in cases that need aggregated API servers and the like. + UseExistingCluster *bool + + // ControlPlaneStartTimeout is the maximum duration each controlplane component + // may take to start. It defaults to the KUBEBUILDER_CONTROLPLANE_START_TIMEOUT + // environment variable or 20 seconds if unspecified + ControlPlaneStartTimeout time.Duration + + // ControlPlaneStopTimeout is the maximum duration each controlplane component + // may take to stop. It defaults to the KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT + // environment variable or 20 seconds if unspecified + ControlPlaneStopTimeout time.Duration + + // KubeAPIServerFlags is the set of flags passed while starting the api server. + // + // Deprecated: use ControlPlane.GetAPIServer().Configure() instead. + KubeAPIServerFlags []string + + // AttachControlPlaneOutput indicates if control plane output will be attached to os.Stdout and os.Stderr. + // Enable this to get more visibility of the testing control plane. + // It respect KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT environment variable. + AttachControlPlaneOutput bool +} + +// Stop stops a running server. +// Previously installed CRDs, as listed in CRDInstallOptions.CRDs, will be uninstalled +// if CRDInstallOptions.CleanUpAfterUse are set to true. +func (te *Environment) Stop() error { + if te.CRDInstallOptions.CleanUpAfterUse { + if err := UninstallCRDs(te.Config, te.CRDInstallOptions); err != nil { + return err + } + } + + if err := te.WebhookInstallOptions.Cleanup(); err != nil { + return err + } + + if te.useExistingCluster() { + return nil + } + + return te.ControlPlane.Stop() +} + +// Start starts a local Kubernetes server and updates te.ApiserverPort with the port it is listening on. +func (te *Environment) Start() (*rest.Config, error) { + if te.useExistingCluster() { + log.V(1).Info("using existing cluster") + if te.Config == nil { + // we want to allow people to pass in their own config, so + // only load a config if it hasn't already been set. + log.V(1).Info("automatically acquiring client configuration") + + var err error + te.Config, err = config.GetConfig() + if err != nil { + return nil, fmt.Errorf("unable to get configuration for existing cluster: %w", err) + } + } + } else { + apiServer := te.ControlPlane.GetAPIServer() + if len(apiServer.Args) == 0 { //nolint:staticcheck + // pass these through separately from above in case something like + // AddUser defaults APIServer. + // + // TODO(directxman12): if/when we feel like making a bigger + // breaking change here, just make APIServer and Etcd non-pointers + // in ControlPlane. + + // NB(directxman12): we still pass these in so that things work if the + // user manually specifies them, but in most cases we expect them to + // be nil so that we use the new .Configure() logic. + apiServer.Args = te.KubeAPIServerFlags //nolint:staticcheck + } + if te.ControlPlane.Etcd == nil { + te.ControlPlane.Etcd = &controlplane.Etcd{} + } + + if os.Getenv(envAttachOutput) == "true" { + te.AttachControlPlaneOutput = true + } + if apiServer.Out == nil && te.AttachControlPlaneOutput { + apiServer.Out = os.Stdout + } + if apiServer.Err == nil && te.AttachControlPlaneOutput { + apiServer.Err = os.Stderr + } + if te.ControlPlane.Etcd.Out == nil && te.AttachControlPlaneOutput { + te.ControlPlane.Etcd.Out = os.Stdout + } + if te.ControlPlane.Etcd.Err == nil && te.AttachControlPlaneOutput { + te.ControlPlane.Etcd.Err = os.Stderr + } + + apiServer.Path = process.BinPathFinder("kube-apiserver", te.BinaryAssetsDirectory) + te.ControlPlane.Etcd.Path = process.BinPathFinder("etcd", te.BinaryAssetsDirectory) + te.ControlPlane.KubectlPath = process.BinPathFinder("kubectl", te.BinaryAssetsDirectory) + + if err := te.defaultTimeouts(); err != nil { + return nil, fmt.Errorf("failed to default controlplane timeouts: %w", err) + } + te.ControlPlane.Etcd.StartTimeout = te.ControlPlaneStartTimeout + te.ControlPlane.Etcd.StopTimeout = te.ControlPlaneStopTimeout + apiServer.StartTimeout = te.ControlPlaneStartTimeout + apiServer.StopTimeout = te.ControlPlaneStopTimeout + + log.V(1).Info("starting control plane") + if err := te.startControlPlane(); err != nil { + return nil, fmt.Errorf("unable to start control plane itself: %w", err) + } + + // Create the *rest.Config for creating new clients + baseConfig := &rest.Config{ + // gotta go fast during tests -- we don't really care about overwhelming our test API server + QPS: 1000.0, + Burst: 2000.0, + } + + adminInfo := User{Name: "admin", Groups: []string{"system:masters"}} + adminUser, err := te.ControlPlane.AddUser(adminInfo, baseConfig) + if err != nil { + return te.Config, fmt.Errorf("unable to provision admin user: %w", err) + } + te.Config = adminUser.Config() + } + + // Set the default scheme if nil. + if te.Scheme == nil { + te.Scheme = scheme.Scheme + } + + // Call PrepWithoutInstalling to setup certificates first + // and have them available to patch CRD conversion webhook as well. + if err := te.WebhookInstallOptions.PrepWithoutInstalling(); err != nil { + return nil, err + } + + log.V(1).Info("installing CRDs") + te.CRDInstallOptions.CRDs = mergeCRDs(te.CRDInstallOptions.CRDs, te.CRDs) + te.CRDInstallOptions.Paths = mergePaths(te.CRDInstallOptions.Paths, te.CRDDirectoryPaths) + te.CRDInstallOptions.ErrorIfPathMissing = te.ErrorIfCRDPathMissing + te.CRDInstallOptions.WebhookOptions = te.WebhookInstallOptions + crds, err := InstallCRDs(te.Config, te.CRDInstallOptions) + if err != nil { + return te.Config, fmt.Errorf("unable to install CRDs onto control plane: %w", err) + } + te.CRDs = crds + + log.V(1).Info("installing webhooks") + if err := te.WebhookInstallOptions.Install(te.Config); err != nil { + return nil, fmt.Errorf("unable to install webhooks onto control plane: %w", err) + } + return te.Config, nil +} + +// AddUser provisions a new user for connecting to this Environment. The user will +// have the specified name & belong to the specified groups. +// +// If you specify a "base" config, the returned REST Config will contain those +// settings as well as any required by the authentication method. You can use +// this to easily specify options like QPS. +// +// This is effectively a convinience alias for ControlPlane.AddUser -- see that +// for more low-level details. +func (te *Environment) AddUser(user User, baseConfig *rest.Config) (*AuthenticatedUser, error) { + return te.ControlPlane.AddUser(user, baseConfig) +} + +func (te *Environment) startControlPlane() error { + numTries, maxRetries := 0, 5 + var err error + for ; numTries < maxRetries; numTries++ { + // Start the control plane - retry if it fails + err = te.ControlPlane.Start() + if err == nil { + break + } + log.Error(err, "unable to start the controlplane", "tries", numTries) + } + if numTries == maxRetries { + return fmt.Errorf("failed to start the controlplane. retried %d times: %w", numTries, err) + } + return nil +} + +func (te *Environment) defaultTimeouts() error { + var err error + if te.ControlPlaneStartTimeout == 0 { + if envVal := os.Getenv(envStartTimeout); envVal != "" { + te.ControlPlaneStartTimeout, err = time.ParseDuration(envVal) + if err != nil { + return err + } + } else { + te.ControlPlaneStartTimeout = defaultKubebuilderControlPlaneStartTimeout + } + } + + if te.ControlPlaneStopTimeout == 0 { + if envVal := os.Getenv(envStopTimeout); envVal != "" { + te.ControlPlaneStopTimeout, err = time.ParseDuration(envVal) + if err != nil { + return err + } + } else { + te.ControlPlaneStopTimeout = defaultKubebuilderControlPlaneStopTimeout + } + } + return nil +} + +func (te *Environment) useExistingCluster() bool { + if te.UseExistingCluster == nil { + return strings.ToLower(os.Getenv(envUseExistingCluster)) == "true" + } + return *te.UseExistingCluster +} + +// DefaultKubeAPIServerFlags exposes the default args for the APIServer so that +// you can use those to append your own additional arguments. +// +// Deprecated: use APIServer.Configure() instead. +var DefaultKubeAPIServerFlags = controlplane.APIServerDefaultArgs //nolint:staticcheck diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go new file mode 100644 index 000000000..8552d3ba6 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go @@ -0,0 +1,428 @@ +/* +Copyright 2019 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "context" + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "time" + + admissionv1 "k8s.io/api/admissionregistration/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/yaml" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/addr" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" +) + +// WebhookInstallOptions are the options for installing mutating or validating webhooks. +type WebhookInstallOptions struct { + // Paths is a list of paths to the directories or files containing the mutating or validating webhooks yaml or json configs. + Paths []string + + // MutatingWebhooks is a list of MutatingWebhookConfigurations to install + MutatingWebhooks []*admissionv1.MutatingWebhookConfiguration + + // ValidatingWebhooks is a list of ValidatingWebhookConfigurations to install + ValidatingWebhooks []*admissionv1.ValidatingWebhookConfiguration + + // IgnoreErrorIfPathMissing will ignore an error if a DirectoryPath does not exist when set to true + IgnoreErrorIfPathMissing bool + + // LocalServingHost is the host for serving webhooks on. + // it will be automatically populated + LocalServingHost string + + // LocalServingPort is the allocated port for serving webhooks on. + // it will be automatically populated by a random available local port + LocalServingPort int + + // LocalServingCertDir is the allocated directory for serving certificates. + // it will be automatically populated by the local temp dir + LocalServingCertDir string + + // CAData is the CA that can be used to trust the serving certificates in LocalServingCertDir. + LocalServingCAData []byte + + // LocalServingHostExternalName is the hostname to use to reach the webhook server. + LocalServingHostExternalName string + + // MaxTime is the max time to wait + MaxTime time.Duration + + // PollInterval is the interval to check + PollInterval time.Duration +} + +// ModifyWebhookDefinitions modifies webhook definitions by: +// - applying CABundle based on the provided tinyca +// - if webhook client config uses service spec, it's removed and replaced with direct url. +func (o *WebhookInstallOptions) ModifyWebhookDefinitions() error { + caData := o.LocalServingCAData + + // generate host port. + hostPort, err := o.generateHostPort() + if err != nil { + return err + } + + for i := range o.MutatingWebhooks { + for j := range o.MutatingWebhooks[i].Webhooks { + updateClientConfig(&o.MutatingWebhooks[i].Webhooks[j].ClientConfig, hostPort, caData) + } + } + + for i := range o.ValidatingWebhooks { + for j := range o.ValidatingWebhooks[i].Webhooks { + updateClientConfig(&o.ValidatingWebhooks[i].Webhooks[j].ClientConfig, hostPort, caData) + } + } + return nil +} + +func updateClientConfig(cc *admissionv1.WebhookClientConfig, hostPort string, caData []byte) { + cc.CABundle = caData + if cc.Service != nil && cc.Service.Path != nil { + url := fmt.Sprintf("https://%s/%s", hostPort, *cc.Service.Path) + cc.URL = &url + cc.Service = nil + } +} + +func (o *WebhookInstallOptions) generateHostPort() (string, error) { + if o.LocalServingPort == 0 { + port, host, err := addr.Suggest(o.LocalServingHost) + if err != nil { + return "", fmt.Errorf("unable to grab random port for serving webhooks on: %v", err) + } + o.LocalServingPort = port + o.LocalServingHost = host + } + host := o.LocalServingHostExternalName + if host == "" { + host = o.LocalServingHost + } + return net.JoinHostPort(host, fmt.Sprintf("%d", o.LocalServingPort)), nil +} + +// PrepWithoutInstalling does the setup parts of Install (populating host-port, +// setting up CAs, etc), without actually truing to do anything with webhook +// definitions. This is largely useful for internal testing of +// controller-runtime, where we need a random host-port & caData for webhook +// tests, but may be useful in similar scenarios. +func (o *WebhookInstallOptions) PrepWithoutInstalling() error { + if err := o.setupCA(); err != nil { + return err + } + + if err := parseWebhook(o); err != nil { + return err + } + + return o.ModifyWebhookDefinitions() +} + +// Install installs specified webhooks to the API server. +func (o *WebhookInstallOptions) Install(config *rest.Config) error { + if len(o.LocalServingCAData) == 0 { + if err := o.PrepWithoutInstalling(); err != nil { + return err + } + } + + if err := createWebhooks(config, o.MutatingWebhooks, o.ValidatingWebhooks); err != nil { + return err + } + + return WaitForWebhooks(config, o.MutatingWebhooks, o.ValidatingWebhooks, *o) +} + +// Cleanup cleans up cert directories. +func (o *WebhookInstallOptions) Cleanup() error { + if o.LocalServingCertDir != "" { + return os.RemoveAll(o.LocalServingCertDir) + } + return nil +} + +// WaitForWebhooks waits for the Webhooks to be available through API server. +func WaitForWebhooks(config *rest.Config, + mutatingWebhooks []*admissionv1.MutatingWebhookConfiguration, + validatingWebhooks []*admissionv1.ValidatingWebhookConfiguration, + options WebhookInstallOptions) error { + waitingFor := map[schema.GroupVersionKind]*sets.String{} + + for _, hook := range mutatingWebhooks { + h := hook + gvk, err := apiutil.GVKForObject(h, scheme.Scheme) + if err != nil { + return fmt.Errorf("unable to get gvk for MutatingWebhookConfiguration %s: %v", hook.GetName(), err) + } + + if _, ok := waitingFor[gvk]; !ok { + waitingFor[gvk] = &sets.String{} + } + waitingFor[gvk].Insert(h.GetName()) + } + + for _, hook := range validatingWebhooks { + h := hook + gvk, err := apiutil.GVKForObject(h, scheme.Scheme) + if err != nil { + return fmt.Errorf("unable to get gvk for ValidatingWebhookConfiguration %s: %v", hook.GetName(), err) + } + + if _, ok := waitingFor[gvk]; !ok { + waitingFor[gvk] = &sets.String{} + } + waitingFor[gvk].Insert(hook.GetName()) + } + + // Poll until all resources are found in discovery + p := &webhookPoller{config: config, waitingFor: waitingFor} + return wait.PollImmediate(options.PollInterval, options.MaxTime, p.poll) +} + +// poller checks if all the resources have been found in discovery, and returns false if not. +type webhookPoller struct { + // config is used to get discovery + config *rest.Config + + // waitingFor is the map of resources keyed by group version that have not yet been found in discovery + waitingFor map[schema.GroupVersionKind]*sets.String +} + +// poll checks if all the resources have been found in discovery, and returns false if not. +func (p *webhookPoller) poll() (done bool, err error) { + // Create a new clientset to avoid any client caching of discovery + c, err := client.New(p.config, client.Options{}) + if err != nil { + return false, err + } + + allFound := true + for gvk, names := range p.waitingFor { + if names.Len() == 0 { + delete(p.waitingFor, gvk) + continue + } + for _, name := range names.List() { + var obj = &unstructured.Unstructured{} + obj.SetGroupVersionKind(gvk) + err := c.Get(context.Background(), client.ObjectKey{ + Namespace: "", + Name: name, + }, obj) + + if err == nil { + names.Delete(name) + } + + if apierrors.IsNotFound(err) { + allFound = false + } + if err != nil { + return false, err + } + } + } + return allFound, nil +} + +// setupCA creates CA for testing and writes them to disk. +func (o *WebhookInstallOptions) setupCA() error { + hookCA, err := certs.NewTinyCA() + if err != nil { + return fmt.Errorf("unable to set up webhook CA: %v", err) + } + + names := []string{"localhost", o.LocalServingHost, o.LocalServingHostExternalName} + hookCert, err := hookCA.NewServingCert(names...) + if err != nil { + return fmt.Errorf("unable to set up webhook serving certs: %v", err) + } + + localServingCertsDir, err := ioutil.TempDir("", "envtest-serving-certs-") + o.LocalServingCertDir = localServingCertsDir + if err != nil { + return fmt.Errorf("unable to create directory for webhook serving certs: %v", err) + } + + certData, keyData, err := hookCert.AsBytes() + if err != nil { + return fmt.Errorf("unable to marshal webhook serving certs: %v", err) + } + + if err := ioutil.WriteFile(filepath.Join(localServingCertsDir, "tls.crt"), certData, 0640); err != nil { //nolint:gosec + return fmt.Errorf("unable to write webhook serving cert to disk: %v", err) + } + if err := ioutil.WriteFile(filepath.Join(localServingCertsDir, "tls.key"), keyData, 0640); err != nil { //nolint:gosec + return fmt.Errorf("unable to write webhook serving key to disk: %v", err) + } + + o.LocalServingCAData = certData + return err +} + +func createWebhooks(config *rest.Config, mutHooks []*admissionv1.MutatingWebhookConfiguration, valHooks []*admissionv1.ValidatingWebhookConfiguration) error { + cs, err := client.New(config, client.Options{}) + if err != nil { + return err + } + + // Create each webhook + for _, hook := range mutHooks { + hook := hook + log.V(1).Info("installing mutating webhook", "webhook", hook.GetName()) + if err := ensureCreated(cs, hook); err != nil { + return err + } + } + for _, hook := range valHooks { + hook := hook + log.V(1).Info("installing validating webhook", "webhook", hook.GetName()) + if err := ensureCreated(cs, hook); err != nil { + return err + } + } + return nil +} + +// ensureCreated creates or update object if already exists in the cluster. +func ensureCreated(cs client.Client, obj client.Object) error { + existing := obj.DeepCopyObject().(client.Object) + err := cs.Get(context.Background(), client.ObjectKey{Name: obj.GetName()}, existing) + switch { + case apierrors.IsNotFound(err): + if err := cs.Create(context.Background(), obj); err != nil { + return err + } + case err != nil: + return err + default: + log.V(1).Info("Webhook configuration already exists, updating", "webhook", obj.GetName()) + obj.SetResourceVersion(existing.GetResourceVersion()) + if err := cs.Update(context.Background(), obj); err != nil { + return err + } + } + return nil +} + +// parseWebhook reads the directories or files of Webhooks in options.Paths and adds the Webhook structs to options. +func parseWebhook(options *WebhookInstallOptions) error { + if len(options.Paths) > 0 { + for _, path := range options.Paths { + _, err := os.Stat(path) + if options.IgnoreErrorIfPathMissing && os.IsNotExist(err) { + continue // skip this path + } + if !options.IgnoreErrorIfPathMissing && os.IsNotExist(err) { + return err // treat missing path as error + } + mutHooks, valHooks, err := readWebhooks(path) + if err != nil { + return err + } + options.MutatingWebhooks = append(options.MutatingWebhooks, mutHooks...) + options.ValidatingWebhooks = append(options.ValidatingWebhooks, valHooks...) + } + } + return nil +} + +// readWebhooks reads the Webhooks from files and Unmarshals them into structs +// returns slice of mutating and validating webhook configurations. +func readWebhooks(path string) ([]*admissionv1.MutatingWebhookConfiguration, []*admissionv1.ValidatingWebhookConfiguration, error) { + // Get the webhook files + var files []os.FileInfo + var err error + log.V(1).Info("reading Webhooks from path", "path", path) + info, err := os.Stat(path) + if err != nil { + return nil, nil, err + } + if !info.IsDir() { + path, files = filepath.Dir(path), []os.FileInfo{info} + } else if files, err = ioutil.ReadDir(path); err != nil { + return nil, nil, err + } + + // file extensions that may contain Webhooks + resourceExtensions := sets.NewString(".json", ".yaml", ".yml") + + var mutHooks []*admissionv1.MutatingWebhookConfiguration + var valHooks []*admissionv1.ValidatingWebhookConfiguration + for _, file := range files { + // Only parse allowlisted file types + if !resourceExtensions.Has(filepath.Ext(file.Name())) { + continue + } + + // Unmarshal Webhooks from file into structs + docs, err := readDocuments(filepath.Join(path, file.Name())) + if err != nil { + return nil, nil, err + } + + for _, doc := range docs { + var generic metav1.PartialObjectMetadata + if err = yaml.Unmarshal(doc, &generic); err != nil { + return nil, nil, err + } + + const ( + admissionregv1 = "admissionregistration.k8s.io/v1" + ) + switch { + case generic.Kind == "MutatingWebhookConfiguration": + if generic.APIVersion != admissionregv1 { + return nil, nil, fmt.Errorf("only v1 is supported right now for MutatingWebhookConfiguration (name: %s)", generic.Name) + } + hook := &admissionv1.MutatingWebhookConfiguration{} + if err := yaml.Unmarshal(doc, hook); err != nil { + return nil, nil, err + } + mutHooks = append(mutHooks, hook) + case generic.Kind == "ValidatingWebhookConfiguration": + if generic.APIVersion != admissionregv1 { + return nil, nil, fmt.Errorf("only v1 is supported right now for ValidatingWebhookConfiguration (name: %s)", generic.Name) + } + hook := &admissionv1.ValidatingWebhookConfiguration{} + if err := yaml.Unmarshal(doc, hook); err != nil { + return nil, nil, err + } + valHooks = append(valHooks, hook) + default: + continue + } + } + + log.V(1).Info("read webhooks from file", "file", file.Name()) + } + return mutHooks, valHooks, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/doc.go new file mode 100644 index 000000000..11e39823e --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package flock is copied from k8s.io/kubernetes/pkg/util/flock to avoid +// importing k8s.io/kubernetes as a dependency. +// +// Provides file locking functionalities on unix systems. +package flock diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/errors.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/errors.go new file mode 100644 index 000000000..ee7a43437 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/errors.go @@ -0,0 +1,24 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flock + +import "errors" + +var ( + // ErrAlreadyLocked is returned when the file is already locked. + ErrAlreadyLocked = errors.New("the file is already locked") +) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_other.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_other.go new file mode 100644 index 000000000..069a5b3a2 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_other.go @@ -0,0 +1,24 @@ +// +build !linux,!darwin,!freebsd,!openbsd,!netbsd,!dragonfly + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flock + +// Acquire is not implemented on non-unix systems. +func Acquire(path string) error { + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_unix.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_unix.go new file mode 100644 index 000000000..3a904f3f5 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/flock/flock_unix.go @@ -0,0 +1,47 @@ +// +build linux darwin freebsd openbsd netbsd dragonfly + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flock + +import ( + "errors" + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +// Acquire acquires a lock on a file for the duration of the process. This method +// is reentrant. +func Acquire(path string) error { + fd, err := unix.Open(path, unix.O_CREAT|unix.O_RDWR|unix.O_CLOEXEC, 0600) + if err != nil { + if errors.Is(err, os.ErrExist) { + return fmt.Errorf("cannot lock file %q: %w", path, ErrAlreadyLocked) + } + return err + } + + // We don't need to close the fd since we should hold + // it until the process exits. + err = unix.Flock(fd, unix.LOCK_NB|unix.LOCK_EX) + if errors.Is(err, unix.EWOULDBLOCK) { // This condition requires LOCK_NB. + return fmt.Errorf("cannot lock file %q: %w", path, ErrAlreadyLocked) + } + return err +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go new file mode 100644 index 000000000..d91a0ca50 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go @@ -0,0 +1,32 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "github.com/go-logr/logr" + + "sigs.k8s.io/controller-runtime/pkg/log" +) + +var ( + // RuntimeLog is a base parent logger for use inside controller-runtime. + RuntimeLog logr.Logger +) + +func init() { + RuntimeLog = log.Log.WithName("controller-runtime") +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go new file mode 100644 index 000000000..7057f3dbe --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go @@ -0,0 +1,78 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package objectutil + +import ( + "errors" + "fmt" + + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// FilterWithLabels returns a copy of the items in objs matching labelSel. +func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) { + outItems := make([]runtime.Object, 0, len(objs)) + for _, obj := range objs { + meta, err := apimeta.Accessor(obj) + if err != nil { + return nil, err + } + if labelSel != nil { + lbls := labels.Set(meta.GetLabels()) + if !labelSel.Matches(lbls) { + continue + } + } + outItems = append(outItems, obj.DeepCopyObject()) + } + return outItems, nil +} + +// IsAPINamespaced returns true if the object is namespace scoped. +// For unstructured objects the gvk is found from the object itself. +func IsAPINamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return false, err + } + + return IsAPINamespacedWithGVK(gvk, scheme, restmapper) +} + +// IsAPINamespacedWithGVK returns true if the object having the provided +// GVK is namespace scoped. +func IsAPINamespacedWithGVK(gk schema.GroupVersionKind, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) { + restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gk.Group, Kind: gk.Kind}) + if err != nil { + return false, fmt.Errorf("failed to get restmapping: %w", err) + } + + scope := restmapping.Scope.Name() + + if scope == "" { + return false, errors.New("scope cannot be identified, empty scope returned") + } + + if scope != apimeta.RESTScopeNameRoot { + return true, nil + } + return false, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/addr/manager.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/addr/manager.go new file mode 100644 index 000000000..caaafa262 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/addr/manager.go @@ -0,0 +1,126 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package addr + +import ( + "errors" + "fmt" + "io/fs" + "net" + "os" + "path/filepath" + "strings" + "time" + + "sigs.k8s.io/controller-runtime/pkg/internal/flock" +) + +// TODO(directxman12): interface / release functionality for external port managers + +const ( + portReserveTime = 2 * time.Minute + portConflictRetry = 100 + portFilePrefix = "port-" +) + +var ( + cacheDir string +) + +func init() { + baseDir, err := os.UserCacheDir() + if err != nil { + baseDir = os.TempDir() + } + cacheDir = filepath.Join(baseDir, "kubebuilder-envtest") + if err := os.MkdirAll(cacheDir, 0750); err != nil { + panic(err) + } +} + +type portCache struct{} + +func (c *portCache) add(port int) (bool, error) { + // Remove outdated ports. + if err := fs.WalkDir(os.DirFS(cacheDir), ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() || !d.Type().IsRegular() || !strings.HasPrefix(path, portFilePrefix) { + return nil + } + info, err := d.Info() + if err != nil { + return err + } + if time.Since(info.ModTime()) > portReserveTime { + if err := os.Remove(filepath.Join(cacheDir, path)); err != nil { + return err + } + } + return nil + }); err != nil { + return false, err + } + // Try allocating new port, by acquiring a file. + path := fmt.Sprintf("%s/%s%d", cacheDir, portFilePrefix, port) + if err := flock.Acquire(path); errors.Is(err, flock.ErrAlreadyLocked) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil +} + +var cache = &portCache{} + +func suggest(listenHost string) (*net.TCPListener, int, string, error) { + if listenHost == "" { + listenHost = "localhost" + } + addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(listenHost, "0")) + if err != nil { + return nil, -1, "", err + } + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return nil, -1, "", err + } + return l, l.Addr().(*net.TCPAddr).Port, + addr.IP.String(), + nil +} + +// Suggest suggests an address a process can listen on. It returns +// a tuple consisting of a free port and the hostname resolved to its IP. +// It makes sure that new port allocated does not conflict with old ports +// allocated within 1 minute. +func Suggest(listenHost string) (int, string, error) { + for i := 0; i < portConflictRetry; i++ { + listener, port, resolvedHost, err := suggest(listenHost) + if err != nil { + return -1, "", err + } + defer listener.Close() + if ok, err := cache.add(port); ok { + return port, resolvedHost, nil + } else if err != nil { + return -1, "", err + } + } + return -1, "", fmt.Errorf("no free ports found after %d retries", portConflictRetry) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/certs/tinyca.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/certs/tinyca.go new file mode 100644 index 000000000..55b044c5b --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/certs/tinyca.go @@ -0,0 +1,224 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certs + +// NB(directxman12): nothing has verified that this has good settings. In fact, +// the setting generated here are probably terrible, but they're fine for integration +// tests. These ABSOLUTELY SHOULD NOT ever be exposed in the public API. They're +// ONLY for use with envtest's ability to configure webhook testing. +// If I didn't otherwise not want to add a dependency on cfssl, I'd just use that. + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + crand "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "net" + "time" + + certutil "k8s.io/client-go/util/cert" +) + +var ( + ellipticCurve = elliptic.P256() + bigOne = big.NewInt(1) +) + +// CertPair is a private key and certificate for use for client auth, as a CA, or serving. +type CertPair struct { + Key crypto.Signer + Cert *x509.Certificate +} + +// CertBytes returns the PEM-encoded version of the certificate for this pair. +func (k CertPair) CertBytes() []byte { + return pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: k.Cert.Raw, + }) +} + +// AsBytes encodes keypair in the appropriate formats for on-disk storage (PEM and +// PKCS8, respectively). +func (k CertPair) AsBytes() (cert []byte, key []byte, err error) { + cert = k.CertBytes() + + rawKeyData, err := x509.MarshalPKCS8PrivateKey(k.Key) + if err != nil { + return nil, nil, fmt.Errorf("unable to encode private key: %v", err) + } + + key = pem.EncodeToMemory(&pem.Block{ + Type: "PRIVATE KEY", + Bytes: rawKeyData, + }) + + return cert, key, nil +} + +// TinyCA supports signing serving certs and client-certs, +// and can be used as an auth mechanism with envtest. +type TinyCA struct { + CA CertPair + orgName string + + nextSerial *big.Int +} + +// newPrivateKey generates a new private key of a relatively sane size (see +// rsaKeySize). +func newPrivateKey() (crypto.Signer, error) { + return ecdsa.GenerateKey(ellipticCurve, crand.Reader) +} + +// NewTinyCA creates a new a tiny CA utility for provisioning serving certs and client certs FOR TESTING ONLY. +// Don't use this for anything else! +func NewTinyCA() (*TinyCA, error) { + caPrivateKey, err := newPrivateKey() + if err != nil { + return nil, fmt.Errorf("unable to generate private key for CA: %v", err) + } + caCfg := certutil.Config{CommonName: "envtest-environment", Organization: []string{"envtest"}} + caCert, err := certutil.NewSelfSignedCACert(caCfg, caPrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to generate certificate for CA: %v", err) + } + + return &TinyCA{ + CA: CertPair{Key: caPrivateKey, Cert: caCert}, + orgName: "envtest", + nextSerial: big.NewInt(1), + }, nil +} + +func (c *TinyCA) makeCert(cfg certutil.Config) (CertPair, error) { + now := time.Now() + + key, err := newPrivateKey() + if err != nil { + return CertPair{}, fmt.Errorf("unable to create private key: %v", err) + } + + serial := new(big.Int).Set(c.nextSerial) + c.nextSerial.Add(c.nextSerial, bigOne) + + template := x509.Certificate{ + Subject: pkix.Name{CommonName: cfg.CommonName, Organization: cfg.Organization}, + DNSNames: cfg.AltNames.DNSNames, + IPAddresses: cfg.AltNames.IPs, + SerialNumber: serial, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: cfg.Usages, + + // technically not necessary for testing, but let's set anyway just in case. + NotBefore: now.UTC(), + // 1 week -- the default for cfssl, and just long enough for a + // long-term test, but not too long that anyone would try to use this + // seriously. + NotAfter: now.Add(168 * time.Hour).UTC(), + } + + certRaw, err := x509.CreateCertificate(crand.Reader, &template, c.CA.Cert, key.Public(), c.CA.Key) + if err != nil { + return CertPair{}, fmt.Errorf("unable to create certificate: %v", err) + } + + cert, err := x509.ParseCertificate(certRaw) + if err != nil { + return CertPair{}, fmt.Errorf("generated invalid certificate, could not parse: %v", err) + } + + return CertPair{ + Key: key, + Cert: cert, + }, nil +} + +// NewServingCert returns a new CertPair for a serving HTTPS on localhost (or other specified names). +func (c *TinyCA) NewServingCert(names ...string) (CertPair, error) { + if len(names) == 0 { + names = []string{"localhost"} + } + dnsNames, ips, err := resolveNames(names) + if err != nil { + return CertPair{}, err + } + + return c.makeCert(certutil.Config{ + CommonName: "localhost", + Organization: []string{c.orgName}, + AltNames: certutil.AltNames{ + DNSNames: dnsNames, + IPs: ips, + }, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }) +} + +// ClientInfo describes some Kubernetes user for the purposes of creating +// client certificates. +type ClientInfo struct { + // Name is the user name (embedded as the cert's CommonName) + Name string + // Groups are the groups to which this user belongs (embedded as the cert's + // Organization) + Groups []string +} + +// NewClientCert produces a new CertPair suitable for use with Kubernetes +// client cert auth with an API server validating based on this CA. +func (c *TinyCA) NewClientCert(user ClientInfo) (CertPair, error) { + return c.makeCert(certutil.Config{ + CommonName: user.Name, + Organization: user.Groups, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + }) +} + +func resolveNames(names []string) ([]string, []net.IP, error) { + dnsNames := []string{} + ips := []net.IP{} + for _, name := range names { + if name == "" { + continue + } + ip := net.ParseIP(name) + if ip == nil { + dnsNames = append(dnsNames, name) + // Also resolve to IPs. + nameIPs, err := net.LookupHost(name) + if err != nil { + return nil, nil, err + } + for _, nameIP := range nameIPs { + ip = net.ParseIP(nameIP) + if ip != nil { + ips = append(ips, ip) + } + } + } else { + ips = append(ips, ip) + } + } + return dnsNames, ips, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/apiserver.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/apiserver.go new file mode 100644 index 000000000..d6a71dc95 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/apiserver.go @@ -0,0 +1,469 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "strconv" + "time" + + "sigs.k8s.io/controller-runtime/pkg/internal/testing/addr" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +const ( + // saKeyFile is the name of the service account signing private key file. + saKeyFile = "sa-signer.key" + // saKeyFile is the name of the service account signing public key (cert) file. + saCertFile = "sa-signer.crt" +) + +// SecureServing provides/configures how the API server serves on the secure port. +type SecureServing struct { + // ListenAddr contains the host & port to serve on. + // + // Configurable. If unset, it will be defaulted. + process.ListenAddr + // CA contains the CA that signed the API server's serving certificates. + // + // Read-only. + CA []byte + // Authn can be used to provision users, and override what type of + // authentication is used to provision users. + // + // Configurable. If unset, it will be defaulted. + Authn +} + +// APIServer knows how to run a kubernetes apiserver. +type APIServer struct { + // URL is the address the ApiServer should listen on for client + // connections. + // + // If set, this will configure the *insecure* serving details. + // If unset, it will contain the insecure port if insecure serving is enabled, + // and otherwise will contain the secure port. + // + // If this is not specified, we default to a random free port on localhost. + // + // Deprecated: use InsecureServing (for the insecure URL) or SecureServing, ideally. + URL *url.URL + + // SecurePort is the additional secure port that the APIServer should listen on. + // + // If set, this will override SecureServing.Port. + // + // Deprecated: use SecureServing. + SecurePort int + + // SecureServing indicates how the API server will serve on the secure port. + // + // Some parts are configurable. Will be defaulted if unset. + SecureServing + + // InsecureServing indicates how the API server will serve on the insecure port. + // + // If unset, the insecure port will be disabled. Set to an empty struct to get + // default values. + // + // Deprecated: does not work with Kubernetes versions 1.20 and above. Use secure + // serving instead. + InsecureServing *process.ListenAddr + + // Path is the path to the apiserver binary. + // + // If this is left as the empty string, we will attempt to locate a binary, + // by checking for the TEST_ASSET_KUBE_APISERVER environment variable, and + // the default test assets directory. See the "Binaries" section above (in + // doc.go) for details. + Path string + + // Args is a list of arguments which will passed to the APIServer binary. + // Before they are passed on, they will be evaluated as go-template strings. + // This means you can use fields which are defined and exported on this + // APIServer struct (e.g. "--cert-dir={{ .Dir }}"). + // Those templates will be evaluated after the defaulting of the APIServer's + // fields has already happened and just before the binary actually gets + // started. Thus you have access to calculated fields like `URL` and others. + // + // If not specified, the minimal set of arguments to run the APIServer will + // be used. + // + // They will be loaded into the same argument set as Configure. Each flag + // will be Append-ed to the configured arguments just before launch. + // + // Deprecated: use Configure instead. + Args []string + + // CertDir is a path to a directory containing whatever certificates the + // APIServer will need. + // + // If left unspecified, then the Start() method will create a fresh temporary + // directory, and the Stop() method will clean it up. + CertDir string + + // EtcdURL is the URL of the Etcd the APIServer should use. + // + // If this is not specified, the Start() method will return an error. + EtcdURL *url.URL + + // StartTimeout, StopTimeout specify the time the APIServer is allowed to + // take when starting and stoppping before an error is emitted. + // + // If not specified, these default to 20 seconds. + StartTimeout time.Duration + StopTimeout time.Duration + + // Out, Err specify where APIServer should write its StdOut, StdErr to. + // + // If not specified, the output will be discarded. + Out io.Writer + Err io.Writer + + processState *process.State + + // args contains the structured arguments to use for running the API server + // Lazily initialized by .Configure(), Defaulted eventually with .defaultArgs() + args *process.Arguments +} + +// Configure returns Arguments that may be used to customize the +// flags used to launch the API server. A set of defaults will +// be applied underneath. +func (s *APIServer) Configure() *process.Arguments { + if s.args == nil { + s.args = process.EmptyArguments() + } + return s.args +} + +// Start starts the apiserver, waits for it to come up, and returns an error, +// if occurred. +func (s *APIServer) Start() error { + if err := s.prepare(); err != nil { + return err + } + return s.processState.Start(s.Out, s.Err) +} + +func (s *APIServer) prepare() error { + if err := s.setProcessState(); err != nil { + return err + } + return s.Authn.Start() +} + +// configurePorts configures the serving ports for this API server. +// +// Most of this method currently deals with making the deprecated fields +// take precedence over the new fields. +func (s *APIServer) configurePorts() error { + // prefer the old fields to the new fields if a user set one, + // otherwise, default the new fields and populate the old ones. + + // Insecure: URL, InsecureServing + if s.URL != nil { + s.InsecureServing = &process.ListenAddr{ + Address: s.URL.Hostname(), + Port: s.URL.Port(), + } + } else if insec := s.InsecureServing; insec != nil { + if insec.Port == "" || insec.Address == "" { + port, host, err := addr.Suggest("") + if err != nil { + return fmt.Errorf("unable to provision unused insecure port: %w", err) + } + s.InsecureServing.Port = strconv.Itoa(port) + s.InsecureServing.Address = host + } + s.URL = s.InsecureServing.URL("http", "") + } + + // Secure: SecurePort, SecureServing + if s.SecurePort != 0 { + s.SecureServing.Port = strconv.Itoa(s.SecurePort) + // if we don't have an address, try the insecure address, and otherwise + // default to loopback. + if s.SecureServing.Address == "" { + if s.InsecureServing != nil { + s.SecureServing.Address = s.InsecureServing.Address + } else { + s.SecureServing.Address = "127.0.0.1" + } + } + } else if s.SecureServing.Port == "" || s.SecureServing.Address == "" { + port, host, err := addr.Suggest("") + if err != nil { + return fmt.Errorf("unable to provision unused secure port: %w", err) + } + s.SecureServing.Port = strconv.Itoa(port) + s.SecureServing.Address = host + s.SecurePort = port + } + + return nil +} + +func (s *APIServer) setProcessState() error { + if s.EtcdURL == nil { + return fmt.Errorf("expected EtcdURL to be configured") + } + + var err error + + // unconditionally re-set this so we can successfully restart + // TODO(directxman12): we supported this in the past, but do we actually + // want to support re-using an API server object to restart? The loss + // of provisioned users is surprising to say the least. + s.processState = &process.State{ + Dir: s.CertDir, + Path: s.Path, + StartTimeout: s.StartTimeout, + StopTimeout: s.StopTimeout, + } + if err := s.processState.Init("kube-apiserver"); err != nil { + return err + } + + if err := s.configurePorts(); err != nil { + return err + } + + // the secure port will always be on, so use that + s.processState.HealthCheck.URL = *s.SecureServing.URL("https", "/healthz") + + s.CertDir = s.processState.Dir + s.Path = s.processState.Path + s.StartTimeout = s.processState.StartTimeout + s.StopTimeout = s.processState.StopTimeout + + if err := s.populateAPIServerCerts(); err != nil { + return err + } + + if s.SecureServing.Authn == nil { + authn, err := NewCertAuthn() + if err != nil { + return err + } + s.SecureServing.Authn = authn + } + + if err := s.Authn.Configure(s.CertDir, s.Configure()); err != nil { + return err + } + + // NB(directxman12): insecure port is a mess: + // - 1.19 and below have the `--insecure-port` flag, and require it to be set to zero to + // disable it, otherwise the default will be used and we'll conflict. + // - 1.20 requires the flag to be unset or set to zero, and yells at you if you configure it + // - 1.24 won't have the flag at all... + // + // In an effort to automatically do the right thing during this mess, we do feature discovery + // on the flags, and hope that we've "parsed" them properly. + // + // TODO(directxman12): once we support 1.20 as the min version (might be when 1.24 comes out, + // might be around 1.25 or 1.26), remove this logic and the corresponding line in API server's + // default args. + if err := s.discoverFlags(); err != nil { + return err + } + + s.processState.Args, s.Args, err = process.TemplateAndArguments(s.Args, s.Configure(), process.TemplateDefaults{ //nolint:staticcheck + Data: s, + Defaults: s.defaultArgs(), + MinimalDefaults: map[string][]string{ + // as per kubernetes-sigs/controller-runtime#641, we need this (we + // probably need other stuff too, but this is the only thing that was + // previously considered a "minimal default") + "service-cluster-ip-range": {"10.0.0.0/24"}, + + // we need *some* authorization mode for health checks on the secure port, + // so default to RBAC unless the user set something else (in which case + // this'll be ignored due to SliceToArguments using AppendNoDefaults). + "authorization-mode": {"RBAC"}, + }, + }) + if err != nil { + return err + } + + return nil +} + +// discoverFlags checks for certain flags that *must* be set in certain +// versions, and *must not* be set in others. +func (s *APIServer) discoverFlags() error { + // Present: <1.24, Absent: >= 1.24 + present, err := s.processState.CheckFlag("insecure-port") + if err != nil { + return err + } + + if !present { + s.Configure().Disable("insecure-port") + } + + return nil +} + +func (s *APIServer) defaultArgs() map[string][]string { + args := map[string][]string{ + "service-cluster-ip-range": {"10.0.0.0/24"}, + "allow-privileged": {"true"}, + // we're keeping this disabled because if enabled, default SA is + // missing which would force all tests to create one in normal + // apiserver operation this SA is created by controller, but that is + // not run in integration environment + "disable-admission-plugins": {"ServiceAccount"}, + "cert-dir": {s.CertDir}, + "authorization-mode": {"RBAC"}, + "secure-port": {s.SecureServing.Port}, + // NB(directxman12): previously we didn't set the bind address for the secure + // port. It *shouldn't* make a difference unless people are doing something really + // funky, but if you start to get bug reports look here ;-) + "bind-address": {s.SecureServing.Address}, + + // required on 1.20+, fine to leave on for <1.20 + "service-account-issuer": {s.SecureServing.URL("https", "/").String()}, + "service-account-key-file": {filepath.Join(s.CertDir, saCertFile)}, + "service-account-signing-key-file": {filepath.Join(s.CertDir, saKeyFile)}, + } + if s.EtcdURL != nil { + args["etcd-servers"] = []string{s.EtcdURL.String()} + } + if s.URL != nil { + args["insecure-port"] = []string{s.URL.Port()} + args["insecure-bind-address"] = []string{s.URL.Hostname()} + } else { + // TODO(directxman12): remove this once 1.21 is the lowest version we support + // (this might be a while, but this line'll break as of 1.24, so see the comment + // in Start + args["insecure-port"] = []string{"0"} + } + return args +} + +func (s *APIServer) populateAPIServerCerts() error { + _, statErr := os.Stat(filepath.Join(s.CertDir, "apiserver.crt")) + if !os.IsNotExist(statErr) { + return statErr + } + + ca, err := certs.NewTinyCA() + if err != nil { + return err + } + + servingCerts, err := ca.NewServingCert() + if err != nil { + return err + } + + certData, keyData, err := servingCerts.AsBytes() + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(s.CertDir, "apiserver.crt"), certData, 0640); err != nil { //nolint:gosec + return err + } + if err := ioutil.WriteFile(filepath.Join(s.CertDir, "apiserver.key"), keyData, 0640); err != nil { //nolint:gosec + return err + } + + s.SecureServing.CA = ca.CA.CertBytes() + + // service account signing files too + saCA, err := certs.NewTinyCA() + if err != nil { + return err + } + + saCert, saKey, err := saCA.CA.AsBytes() + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(s.CertDir, saCertFile), saCert, 0640); err != nil { //nolint:gosec + return err + } + return ioutil.WriteFile(filepath.Join(s.CertDir, saKeyFile), saKey, 0640) //nolint:gosec +} + +// Stop stops this process gracefully, waits for its termination, and cleans up +// the CertDir if necessary. +func (s *APIServer) Stop() error { + if s.processState != nil { + if s.processState.DirNeedsCleaning { + s.CertDir = "" // reset the directory if it was randomly allocated, so that we can safely restart + } + if err := s.processState.Stop(); err != nil { + return err + } + } + return s.Authn.Stop() +} + +// APIServerDefaultArgs exposes the default args for the APIServer so that you +// can use those to append your own additional arguments. +// +// Note that these arguments don't handle newer API servers well to due the more +// complex feature detection neeeded. It's recommended that you switch to .Configure +// as you upgrade API server versions. +// +// Deprecated: use APIServer.Configure(). +var APIServerDefaultArgs = []string{ + "--advertise-address=127.0.0.1", + "--etcd-servers={{ if .EtcdURL }}{{ .EtcdURL.String }}{{ end }}", + "--cert-dir={{ .CertDir }}", + "--insecure-port={{ if .URL }}{{ .URL.Port }}{{else}}0{{ end }}", + "{{ if .URL }}--insecure-bind-address={{ .URL.Hostname }}{{ end }}", + "--secure-port={{ if .SecurePort }}{{ .SecurePort }}{{ end }}", + // we're keeping this disabled because if enabled, default SA is missing which would force all tests to create one + // in normal apiserver operation this SA is created by controller, but that is not run in integration environment + "--disable-admission-plugins=ServiceAccount", + "--service-cluster-ip-range=10.0.0.0/24", + "--allow-privileged=true", + // NB(directxman12): we also enable RBAC if nothing else was enabled +} + +// PrepareAPIServer is an internal-only (NEVER SHOULD BE EXPOSED) +// function that sets up the API server just before starting it, +// without actually starting it. This saves time on tests. +// +// NB(directxman12): do not expose this outside of internal -- it's unsafe to +// use, because things like port allocation could race even more than they +// currently do if you later call start! +func PrepareAPIServer(s *APIServer) error { + return s.prepare() +} + +// APIServerArguments is an internal-only (NEVER SHOULD BE EXPOSED) +// function that sets up the API server just before starting it, +// without actually starting it. It's public to make testing easier. +// +// NB(directxman12): do not expose this outside of internal. +func APIServerArguments(s *APIServer) []string { + return s.processState.Args +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/auth.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/auth.go new file mode 100644 index 000000000..b2cd4e5e0 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/auth.go @@ -0,0 +1,142 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "fmt" + "io/ioutil" + "path/filepath" + + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +// User represents a Kubernetes user. +type User struct { + // Name is the user's Name. + Name string + // Groups are the groups to which the user belongs. + Groups []string +} + +// Authn knows how to configure an API server for a particular type of authentication, +// and provision users under that authentication scheme. +// +// The methods must be called in the following order (as presented below in the interface +// for a mnemonic): +// +// 1. Configure +// 2. Start +// 3. AddUsers (0+ calls) +// 4. Stop. +type Authn interface { + // Configure provides the working directory to this authenticator, + // and configures the given API server arguments to make use of this authenticator. + // + // Should be called first. + Configure(workDir string, args *process.Arguments) error + // Start runs this authenticator. Will be called just before API server start. + // + // Must be called after Configure. + Start() error + // AddUser provisions a user, returning a copy of the given base rest.Config + // configured to authenticate as that users. + // + // May only be called while the authenticator is "running". + AddUser(user User, baseCfg *rest.Config) (*rest.Config, error) + // Stop shuts down this authenticator. + Stop() error +} + +// CertAuthn is an authenticator (Authn) that makes use of client certificate authn. +type CertAuthn struct { + // ca is the CA used to sign the client certs + ca *certs.TinyCA + // certDir is the directory used to write the CA crt file + // so that the API server can read it. + certDir string +} + +// NewCertAuthn creates a new client-cert-based Authn with a new CA. +func NewCertAuthn() (*CertAuthn, error) { + ca, err := certs.NewTinyCA() + if err != nil { + return nil, fmt.Errorf("unable to provision client certificate auth CA: %w", err) + } + return &CertAuthn{ + ca: ca, + }, nil +} + +// AddUser provisions a new user that's authenticated via certificates, with +// the given uesrname and groups embedded in the certificate as expected by the +// API server. +func (c *CertAuthn) AddUser(user User, baseCfg *rest.Config) (*rest.Config, error) { + certs, err := c.ca.NewClientCert(certs.ClientInfo{ + Name: user.Name, + Groups: user.Groups, + }) + if err != nil { + return nil, fmt.Errorf("unable to create client certificates for %s: %w", user.Name, err) + } + + crt, key, err := certs.AsBytes() + if err != nil { + return nil, fmt.Errorf("unable to serialize client certificates for %s: %w", user.Name, err) + } + + cfg := rest.CopyConfig(baseCfg) + cfg.CertData = crt + cfg.KeyData = key + + return cfg, nil +} + +// caCrtPath returns the path to the on-disk client-cert CA crt file. +func (c *CertAuthn) caCrtPath() string { + return filepath.Join(c.certDir, "client-cert-auth-ca.crt") +} + +// Configure provides the working directory to this authenticator, +// and configures the given API server arguments to make use of this authenticator. +func (c *CertAuthn) Configure(workDir string, args *process.Arguments) error { + c.certDir = workDir + args.Set("client-ca-file", c.caCrtPath()) + return nil +} + +// Start runs this authenticator. Will be called just before API server start. +// +// Must be called after Configure. +func (c *CertAuthn) Start() error { + if len(c.certDir) == 0 { + return fmt.Errorf("start called before configure") + } + caCrt := c.ca.CA.CertBytes() + if err := ioutil.WriteFile(c.caCrtPath(), caCrt, 0640); err != nil { //nolint:gosec + return fmt.Errorf("unable to save the client certificate CA to %s: %w", c.caCrtPath(), err) + } + + return nil +} + +// Stop shuts down this authenticator. +func (c *CertAuthn) Stop() error { + // no-op -- our workdir is cleaned up for us automatically + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/etcd.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/etcd.go new file mode 100644 index 000000000..c6d625173 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/etcd.go @@ -0,0 +1,202 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "io" + "net" + "net/url" + "strconv" + "time" + + "sigs.k8s.io/controller-runtime/pkg/internal/testing/addr" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +// Etcd knows how to run an etcd server. +type Etcd struct { + // URL is the address the Etcd should listen on for client connections. + // + // If this is not specified, we default to a random free port on localhost. + URL *url.URL + + // Path is the path to the etcd binary. + // + // If this is left as the empty string, we will attempt to locate a binary, + // by checking for the TEST_ASSET_ETCD environment variable, and the default + // test assets directory. See the "Binaries" section above (in doc.go) for + // details. + Path string + + // Args is a list of arguments which will passed to the Etcd binary. Before + // they are passed on, the`y will be evaluated as go-template strings. This + // means you can use fields which are defined and exported on this Etcd + // struct (e.g. "--data-dir={{ .Dir }}"). + // Those templates will be evaluated after the defaulting of the Etcd's + // fields has already happened and just before the binary actually gets + // started. Thus you have access to calculated fields like `URL` and others. + // + // If not specified, the minimal set of arguments to run the Etcd will be + // used. + // + // They will be loaded into the same argument set as Configure. Each flag + // will be Append-ed to the configured arguments just before launch. + // + // Deprecated: use Configure instead. + Args []string + + // DataDir is a path to a directory in which etcd can store its state. + // + // If left unspecified, then the Start() method will create a fresh temporary + // directory, and the Stop() method will clean it up. + DataDir string + + // StartTimeout, StopTimeout specify the time the Etcd is allowed to + // take when starting and stopping before an error is emitted. + // + // If not specified, these default to 20 seconds. + StartTimeout time.Duration + StopTimeout time.Duration + + // Out, Err specify where Etcd should write its StdOut, StdErr to. + // + // If not specified, the output will be discarded. + Out io.Writer + Err io.Writer + + // processState contains the actual details about this running process + processState *process.State + + // args contains the structured arguments to use for running etcd. + // Lazily initialized by .Configure(), Defaulted eventually with .defaultArgs() + args *process.Arguments + + // listenPeerURL is the address the Etcd should listen on for peer connections. + // It's automatically generated and a random port is picked during execution. + listenPeerURL *url.URL +} + +// Start starts the etcd, waits for it to come up, and returns an error, if one +// occoured. +func (e *Etcd) Start() error { + if err := e.setProcessState(); err != nil { + return err + } + return e.processState.Start(e.Out, e.Err) +} + +func (e *Etcd) setProcessState() error { + e.processState = &process.State{ + Dir: e.DataDir, + Path: e.Path, + StartTimeout: e.StartTimeout, + StopTimeout: e.StopTimeout, + } + + // unconditionally re-set this so we can successfully restart + // TODO(directxman12): we supported this in the past, but do we actually + // want to support re-using an API server object to restart? The loss + // of provisioned users is surprising to say the least. + if err := e.processState.Init("etcd"); err != nil { + return err + } + + // Set the listen url. + if e.URL == nil { + port, host, err := addr.Suggest("") + if err != nil { + return err + } + e.URL = &url.URL{ + Scheme: "http", + Host: net.JoinHostPort(host, strconv.Itoa(port)), + } + } + + // Set the listen peer URL. + { + port, host, err := addr.Suggest("") + if err != nil { + return err + } + e.listenPeerURL = &url.URL{ + Scheme: "http", + Host: net.JoinHostPort(host, strconv.Itoa(port)), + } + } + + // can use /health as of etcd 3.3.0 + e.processState.HealthCheck.URL = *e.URL + e.processState.HealthCheck.Path = "/health" + + e.DataDir = e.processState.Dir + e.Path = e.processState.Path + e.StartTimeout = e.processState.StartTimeout + e.StopTimeout = e.processState.StopTimeout + + var err error + e.processState.Args, e.Args, err = process.TemplateAndArguments(e.Args, e.Configure(), process.TemplateDefaults{ //nolint:staticcheck + Data: e, + Defaults: e.defaultArgs(), + }) + return err +} + +// Stop stops this process gracefully, waits for its termination, and cleans up +// the DataDir if necessary. +func (e *Etcd) Stop() error { + if e.processState.DirNeedsCleaning { + e.DataDir = "" // reset the directory if it was randomly allocated, so that we can safely restart + } + return e.processState.Stop() +} + +func (e *Etcd) defaultArgs() map[string][]string { + args := map[string][]string{ + "listen-peer-urls": {e.listenPeerURL.String()}, + "data-dir": {e.DataDir}, + } + if e.URL != nil { + args["advertise-client-urls"] = []string{e.URL.String()} + args["listen-client-urls"] = []string{e.URL.String()} + } + + // Add unsafe no fsync, available from etcd 3.5 + if ok, _ := e.processState.CheckFlag("unsafe-no-fsync"); ok { + args["unsafe-no-fsync"] = []string{"true"} + } + return args +} + +// Configure returns Arguments that may be used to customize the +// flags used to launch etcd. A set of defaults will +// be applied underneath. +func (e *Etcd) Configure() *process.Arguments { + if e.args == nil { + e.args = process.EmptyArguments() + } + return e.args +} + +// EtcdDefaultArgs exposes the default args for Etcd so that you +// can use those to append your own additional arguments. +var EtcdDefaultArgs = []string{ + "--listen-peer-urls=http://localhost:0", + "--advertise-client-urls={{ if .URL }}{{ .URL.String }}{{ end }}", + "--listen-client-urls={{ if .URL }}{{ .URL.String }}{{ end }}", + "--data-dir={{ .DataDir }}", +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/kubectl.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/kubectl.go new file mode 100644 index 000000000..a27b7a0ff --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/kubectl.go @@ -0,0 +1,119 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "bytes" + "fmt" + "io" + "net/url" + "os/exec" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + kcapi "k8s.io/client-go/tools/clientcmd/api" + + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +const ( + envtestName = "envtest" +) + +// KubeConfigFromREST reverse-engineers a kubeconfig file from a rest.Config. +// The options are tailored towards the rest.Configs we generate, so they're +// not broadly applicable. +// +// This is not intended to be exposed beyond internal for the above reasons. +func KubeConfigFromREST(cfg *rest.Config) ([]byte, error) { + kubeConfig := kcapi.NewConfig() + protocol := "https" + if !rest.IsConfigTransportTLS(*cfg) { + protocol = "http" + } + + // cfg.Host is a URL, so we need to parse it so we can properly append the API path + baseURL, err := url.Parse(cfg.Host) + if err != nil { + return nil, fmt.Errorf("unable to interpret config's host value as a URL: %w", err) + } + + kubeConfig.Clusters[envtestName] = &kcapi.Cluster{ + // TODO(directxman12): if client-go ever decides to expose defaultServerUrlFor(config), + // we can just use that. Note that this is not the same as the public DefaultServerURL, + // which requires us to pass a bunch of stuff in manually. + Server: (&url.URL{Scheme: protocol, Host: baseURL.Host, Path: cfg.APIPath}).String(), + CertificateAuthorityData: cfg.CAData, + } + kubeConfig.AuthInfos[envtestName] = &kcapi.AuthInfo{ + // try to cover all auth strategies that aren't plugins + ClientCertificateData: cfg.CertData, + ClientKeyData: cfg.KeyData, + Token: cfg.BearerToken, + Username: cfg.Username, + Password: cfg.Password, + } + kcCtx := kcapi.NewContext() + kcCtx.Cluster = envtestName + kcCtx.AuthInfo = envtestName + kubeConfig.Contexts[envtestName] = kcCtx + kubeConfig.CurrentContext = envtestName + + contents, err := clientcmd.Write(*kubeConfig) + if err != nil { + return nil, fmt.Errorf("unable to serialize kubeconfig file: %w", err) + } + return contents, nil +} + +// KubeCtl is a wrapper around the kubectl binary. +type KubeCtl struct { + // Path where the kubectl binary can be found. + // + // If this is left empty, we will attempt to locate a binary, by checking for + // the TEST_ASSET_KUBECTL environment variable, and the default test assets + // directory. See the "Binaries" section above (in doc.go) for details. + Path string + + // Opts can be used to configure additional flags which will be used each + // time the wrapped binary is called. + // + // For example, you might want to use this to set the URL of the APIServer to + // connect to. + Opts []string +} + +// Run executes the wrapped binary with some preconfigured options and the +// arguments given to this method. It returns Readers for the stdout and +// stderr. +func (k *KubeCtl) Run(args ...string) (stdout, stderr io.Reader, err error) { + if k.Path == "" { + k.Path = process.BinPathFinder("kubectl", "") + } + + stdoutBuffer := &bytes.Buffer{} + stderrBuffer := &bytes.Buffer{} + allArgs := append(k.Opts, args...) + + cmd := exec.Command(k.Path, allArgs...) + cmd.Stdout = stdoutBuffer + cmd.Stderr = stderrBuffer + + err = cmd.Run() + + return stdoutBuffer, stderrBuffer, err +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/plane.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/plane.go new file mode 100644 index 000000000..36fd3c630 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/plane.go @@ -0,0 +1,248 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "fmt" + "net/url" + "os" + + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" +) + +// NewTinyCA creates a new a tiny CA utility for provisioning serving certs and client certs FOR TESTING ONLY. +// Don't use this for anything else! +var NewTinyCA = certs.NewTinyCA + +// ControlPlane is a struct that knows how to start your test control plane. +// +// Right now, that means Etcd and your APIServer. This is likely to increase in +// future. +type ControlPlane struct { + APIServer *APIServer + Etcd *Etcd + + // Kubectl will override the default asset search path for kubectl + KubectlPath string + + // for the deprecated methods (Kubectl, etc) + defaultUserCfg *rest.Config + defaultUserKubectl *KubeCtl +} + +// Start will start your control plane processes. To stop them, call Stop(). +func (f *ControlPlane) Start() error { + if f.Etcd == nil { + f.Etcd = &Etcd{} + } + if err := f.Etcd.Start(); err != nil { + return err + } + + if f.APIServer == nil { + f.APIServer = &APIServer{} + } + f.APIServer.EtcdURL = f.Etcd.URL + if err := f.APIServer.Start(); err != nil { + return err + } + + // provision the default user -- can be removed when the related + // methods are removed. The default user has admin permissions to + // mimic legacy no-authz setups. + user, err := f.AddUser(User{Name: "default", Groups: []string{"system:masters"}}, &rest.Config{}) + if err != nil { + return fmt.Errorf("unable to provision the default (legacy) user: %w", err) + } + kubectl, err := user.Kubectl() + if err != nil { + return fmt.Errorf("unable to provision the default (legacy) kubeconfig: %w", err) + } + f.defaultUserCfg = user.Config() + f.defaultUserKubectl = kubectl + return nil +} + +// Stop will stop your control plane processes, and clean up their data. +func (f *ControlPlane) Stop() error { + var errList []error + + if f.APIServer != nil { + if err := f.APIServer.Stop(); err != nil { + errList = append(errList, err) + } + } + if f.Etcd != nil { + if err := f.Etcd.Stop(); err != nil { + errList = append(errList, err) + } + } + + return kerrors.NewAggregate(errList) +} + +// APIURL returns the URL you should connect to to talk to your API server. +// +// If insecure serving is configured, this will contain the insecure port. +// Otherwise, it will contain the secure port. +// +// Deprecated: use AddUser instead, or APIServer.{Ins|S}ecureServing.URL if +// you really want just the URL. +func (f *ControlPlane) APIURL() *url.URL { + return f.APIServer.URL +} + +// KubeCtl returns a pre-configured KubeCtl, ready to connect to this +// ControlPlane. +// +// Deprecated: use AddUser & AuthenticatedUser.Kubectl instead. +func (f *ControlPlane) KubeCtl() *KubeCtl { + return f.defaultUserKubectl +} + +// RESTClientConfig returns a pre-configured restconfig, ready to connect to +// this ControlPlane. +// +// Deprecated: use AddUser & AuthenticatedUser.Config instead. +func (f *ControlPlane) RESTClientConfig() (*rest.Config, error) { + return f.defaultUserCfg, nil +} + +// AuthenticatedUser contains access information for an provisioned user, +// including REST config, kubeconfig contents, and access to a KubeCtl instance. +// +// It's not "safe" to use the methods on this till after the API server has been +// started (due to certificate initialization and such). The various methods will +// panic if this is done. +type AuthenticatedUser struct { + // cfg is the rest.Config for connecting to the API server. It's lazily initialized. + cfg *rest.Config + // cfgIsComplete indicates the cfg has had late-initialized fields (e.g. + // API server CA data) initialized. + cfgIsComplete bool + + // apiServer is a handle to the APIServer that's used when finalizing cfg + // and producing the kubectl instance. + plane *ControlPlane + + // kubectl is our existing, provisioned kubectl. We don't provision one + // till someone actually asks for it. + kubectl *KubeCtl +} + +// Config returns the REST config that can be used to connect to the API server +// as this user. +// +// Will panic if used before the API server is started. +func (u *AuthenticatedUser) Config() *rest.Config { + // NB(directxman12): we choose to panic here for ergonomics sake, and because there's + // not really much you can do to "handle" this error. This machinery is intended to be + // used in tests anyway, so panicing is not a particularly big deal. + if u.cfgIsComplete { + return u.cfg + } + if len(u.plane.APIServer.SecureServing.CA) == 0 { + panic("the API server has not yet been started, please do that before accessing connection details") + } + + u.cfg.CAData = u.plane.APIServer.SecureServing.CA + u.cfg.Host = u.plane.APIServer.SecureServing.URL("https", "/").String() + u.cfgIsComplete = true + return u.cfg +} + +// KubeConfig returns a KubeConfig that's roughly equivalent to this user's REST config. +// +// Will panic if used before the API server is started. +func (u AuthenticatedUser) KubeConfig() ([]byte, error) { + // NB(directxman12): we don't return the actual API object to avoid yet another + // piece of kubernetes API in our public API, and also because generally the thing + // you want to do with this is just write it out to a file for external debugging + // purposes, etc. + return KubeConfigFromREST(u.Config()) +} + +// Kubectl returns a KubeCtl instance for talking to the API server as this user. It uses +// a kubeconfig equivalent to that returned by .KubeConfig. +// +// Will panic if used before the API server is started. +func (u *AuthenticatedUser) Kubectl() (*KubeCtl, error) { + if u.kubectl != nil { + return u.kubectl, nil + } + if len(u.plane.APIServer.CertDir) == 0 { + panic("the API server has not yet been started, please do that before accessing connection details") + } + + // cleaning this up is handled when our tmpDir is deleted + out, err := os.CreateTemp(u.plane.APIServer.CertDir, "*.kubecfg") + if err != nil { + return nil, fmt.Errorf("unable to create file for kubeconfig: %w", err) + } + defer out.Close() + contents, err := KubeConfigFromREST(u.Config()) + if err != nil { + return nil, err + } + if _, err := out.Write(contents); err != nil { + return nil, fmt.Errorf("unable to write kubeconfig to disk at %s: %w", out.Name(), err) + } + k := &KubeCtl{ + Path: u.plane.KubectlPath, + } + k.Opts = append(k.Opts, fmt.Sprintf("--kubeconfig=%s", out.Name())) + u.kubectl = k + return k, nil +} + +// AddUser provisions a new user in the cluster. It uses the APIServer's authentication +// strategy -- see APIServer.SecureServing.Authn. +// +// Unlike AddUser, it's safe to pass a nil rest.Config here if you have no +// particular opinions about the config. +// +// The default authentication strategy is not guaranteed to any specific strategy, but it is +// guaranteed to be callable both before and after Start has been called (but, as noted in the +// AuthenticatedUser docs, the given user objects are only valid after Start has been called). +func (f *ControlPlane) AddUser(user User, baseConfig *rest.Config) (*AuthenticatedUser, error) { + if f.GetAPIServer().SecureServing.Authn == nil { + return nil, fmt.Errorf("no API server authentication is configured yet. The API server defaults one when Start is called, did you mean to use that?") + } + + if baseConfig == nil { + baseConfig = &rest.Config{} + } + cfg, err := f.GetAPIServer().SecureServing.AddUser(user, baseConfig) + if err != nil { + return nil, err + } + + return &AuthenticatedUser{ + cfg: cfg, + plane: f, + }, nil +} + +// GetAPIServer returns this ControlPlane's APIServer, initializing it if necessary. +func (f *ControlPlane) GetAPIServer() *APIServer { + if f.APIServer == nil { + f.APIServer = &APIServer{} + } + return f.APIServer +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/arguments.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/arguments.go new file mode 100644 index 000000000..6c2c91e14 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/arguments.go @@ -0,0 +1,340 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "bytes" + "html/template" + "sort" + "strings" +) + +// RenderTemplates returns an []string to render the templates +// +// Deprecated: will be removed in favor of Arguments. +func RenderTemplates(argTemplates []string, data interface{}) (args []string, err error) { + var t *template.Template + + for _, arg := range argTemplates { + t, err = template.New(arg).Parse(arg) + if err != nil { + args = nil + return + } + + buf := &bytes.Buffer{} + err = t.Execute(buf, data) + if err != nil { + args = nil + return + } + args = append(args, buf.String()) + } + + return +} + +// SliceToArguments converts a slice of arguments to structured arguments, +// appending each argument that starts with `--` and contains an `=` to the +// argument set (ignoring defaults), returning the rest. +// +// Deprecated: will be removed when RenderTemplates is removed. +func SliceToArguments(sliceArgs []string, args *Arguments) []string { + var rest []string + for i, arg := range sliceArgs { + if arg == "--" { + rest = append(rest, sliceArgs[i:]...) + return rest + } + // skip non-flag arguments, skip arguments w/o equals because we + // can't tell if the next argument should take a value + if !strings.HasPrefix(arg, "--") || !strings.Contains(arg, "=") { + rest = append(rest, arg) + continue + } + + parts := strings.SplitN(arg[2:], "=", 2) + name := parts[0] + val := parts[1] + + args.AppendNoDefaults(name, val) + } + + return rest +} + +// TemplateDefaults specifies defaults to be used for joining structured arguments with templates. +// +// Deprecated: will be removed when RenderTemplates is removed. +type TemplateDefaults struct { + // Data will be used to render the template. + Data interface{} + // Defaults will be used to default structured arguments if no template is passed. + Defaults map[string][]string + // MinimalDefaults will be used to default structured arguments if a template is passed. + // Use this for flags which *must* be present. + MinimalDefaults map[string][]string // for api server service-cluster-ip-range +} + +// TemplateAndArguments joins structured arguments and non-structured arguments, preserving existing +// behavior. Namely: +// +// 1. if templ has len > 0, it will be rendered against data +// 2. the rendered template values that look like `--foo=bar` will be split +// and appended to args, the rest will be kept around +// 3. the given args will be rendered as string form. If a template is given, +// no defaults will be used, otherwise defaults will be used +// 4. a result of [args..., rest...] will be returned +// +// It returns the resulting rendered arguments, plus the arguments that were +// not transferred to `args` during rendering. +// +// Deprecated: will be removed when RenderTemplates is removed. +func TemplateAndArguments(templ []string, args *Arguments, data TemplateDefaults) (allArgs []string, nonFlagishArgs []string, err error) { + if len(templ) == 0 { // 3 & 4 (no template case) + return args.AsStrings(data.Defaults), nil, nil + } + + // 1: render the template + rendered, err := RenderTemplates(templ, data.Data) + if err != nil { + return nil, nil, err + } + + // 2: filter out structured args and add them to args + rest := SliceToArguments(rendered, args) + + // 3 (template case): render structured args, no defaults (matching the + // legacy case where if Args was specified, no defaults were used) + res := args.AsStrings(data.MinimalDefaults) + + // 4: return the rendered structured args + all non-structured args + return append(res, rest...), rest, nil +} + +// EmptyArguments constructs an empty set of flags with no defaults. +func EmptyArguments() *Arguments { + return &Arguments{ + values: make(map[string]Arg), + } +} + +// Arguments are structured, overridable arguments. +// Each Arguments object contains some set of default arguments, which may +// be appended to, or overridden. +// +// When ready, you can serialize them to pass to exec.Command and friends using +// AsStrings. +// +// All flag-setting methods return the *same* instance of Arguments so that you +// can chain calls. +type Arguments struct { + // values contains the user-set values for the arguments. + // `values[key] = dontPass` means "don't pass this flag" + // `values[key] = passAsName` means "pass this flag without args like --key` + // `values[key] = []string{a, b, c}` means "--key=a --key=b --key=c` + // any values not explicitly set here will be copied from defaults on final rendering. + values map[string]Arg +} + +// Arg is an argument that has one or more values, +// and optionally falls back to default values. +type Arg interface { + // Append adds new values to this argument, returning + // a new instance contain the new value. The intermediate + // argument should generally be assumed to be consumed. + Append(vals ...string) Arg + // Get returns the full set of values, optionally including + // the passed in defaults. If it returns nil, this will be + // skipped. If it returns a non-nil empty slice, it'll be + // assumed that the argument should be passed as name-only. + Get(defaults []string) []string +} + +type userArg []string + +func (a userArg) Append(vals ...string) Arg { + return userArg(append(a, vals...)) //nolint:unconvert +} +func (a userArg) Get(_ []string) []string { + return []string(a) +} + +type defaultedArg []string + +func (a defaultedArg) Append(vals ...string) Arg { + return defaultedArg(append(a, vals...)) //nolint:unconvert +} +func (a defaultedArg) Get(defaults []string) []string { + res := append([]string(nil), defaults...) + return append(res, a...) +} + +type dontPassArg struct{} + +func (a dontPassArg) Append(vals ...string) Arg { + return userArg(vals) +} +func (dontPassArg) Get(_ []string) []string { + return nil +} + +type passAsNameArg struct{} + +func (a passAsNameArg) Append(_ ...string) Arg { + return passAsNameArg{} +} +func (passAsNameArg) Get(_ []string) []string { + return []string{} +} + +var ( + // DontPass indicates that the given argument will not actually be + // rendered. + DontPass Arg = dontPassArg{} + // PassAsName indicates that the given flag will be passed as `--key` + // without any value. + PassAsName Arg = passAsNameArg{} +) + +// AsStrings serializes this set of arguments to a slice of strings appropriate +// for passing to exec.Command and friends, making use of the given defaults +// as indicated for each particular argument. +// +// - Any flag in defaults that's not in Arguments will be present in the output +// - Any flag that's present in Arguments will be passed the corresponding +// defaults to do with as it will (ignore, append-to, suppress, etc). +func (a *Arguments) AsStrings(defaults map[string][]string) []string { + // sort for deterministic ordering + keysInOrder := make([]string, 0, len(defaults)+len(a.values)) + for key := range defaults { + if _, userSet := a.values[key]; userSet { + continue + } + keysInOrder = append(keysInOrder, key) + } + for key := range a.values { + keysInOrder = append(keysInOrder, key) + } + sort.Strings(keysInOrder) + + var res []string + for _, key := range keysInOrder { + vals := a.Get(key).Get(defaults[key]) + switch { + case vals == nil: // don't pass + continue + case len(vals) == 0: // pass as name + res = append(res, "--"+key) + default: + for _, val := range vals { + res = append(res, "--"+key+"="+val) + } + } + } + + return res +} + +// Get returns the value of the given flag. If nil, +// it will not be passed in AsString, otherwise: +// +// len == 0 --> `--key`, len > 0 --> `--key=val1 --key=val2 ...`. +func (a *Arguments) Get(key string) Arg { + if vals, ok := a.values[key]; ok { + return vals + } + return defaultedArg(nil) +} + +// Enable configures the given key to be passed as a "name-only" flag, +// like, `--key`. +func (a *Arguments) Enable(key string) *Arguments { + a.values[key] = PassAsName + return a +} + +// Disable prevents this flag from be passed. +func (a *Arguments) Disable(key string) *Arguments { + a.values[key] = DontPass + return a +} + +// Append adds additional values to this flag. If this flag has +// yet to be set, initial values will include defaults. If you want +// to intentionally ignore defaults/start from scratch, call AppendNoDefaults. +// +// Multiple values will look like `--key=value1 --key=value2 ...`. +func (a *Arguments) Append(key string, values ...string) *Arguments { + vals, present := a.values[key] + if !present { + vals = defaultedArg{} + } + a.values[key] = vals.Append(values...) + return a +} + +// AppendNoDefaults adds additional values to this flag. However, +// unlike Append, it will *not* copy values from defaults. +func (a *Arguments) AppendNoDefaults(key string, values ...string) *Arguments { + vals, present := a.values[key] + if !present { + vals = userArg{} + } + a.values[key] = vals.Append(values...) + return a +} + +// Set resets the given flag to the specified values, ignoring any existing +// values or defaults. +func (a *Arguments) Set(key string, values ...string) *Arguments { + a.values[key] = userArg(values) + return a +} + +// SetRaw sets the given flag to the given Arg value directly. Use this if +// you need to do some complicated deferred logic or something. +// +// Otherwise behaves like Set. +func (a *Arguments) SetRaw(key string, val Arg) *Arguments { + a.values[key] = val + return a +} + +// FuncArg is a basic implementation of Arg that can be used for custom argument logic, +// like pulling values out of APIServer, or dynamically calculating values just before +// launch. +// +// The given function will be mapped directly to Arg#Get, and will generally be +// used in conjunction with SetRaw. For example, to set `--some-flag` to the +// API server's CertDir, you could do: +// +// server.Configure().SetRaw("--some-flag", FuncArg(func(defaults []string) []string { +// return []string{server.CertDir} +// })) +// +// FuncArg ignores Appends; if you need to support appending values too, consider implementing +// Arg directly. +type FuncArg func([]string) []string + +// Append is a no-op for FuncArg, and just returns itself. +func (a FuncArg) Append(vals ...string) Arg { return a } + +// Get delegates functionality to the FuncArg function itself. +func (a FuncArg) Get(defaults []string) []string { + return a(defaults) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/bin_path_finder.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/bin_path_finder.go new file mode 100644 index 000000000..e1428aa6e --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/bin_path_finder.go @@ -0,0 +1,70 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "os" + "path/filepath" + "regexp" + "strings" +) + +const ( + // EnvAssetsPath is the environment variable that stores the global test + // binary location override. + EnvAssetsPath = "KUBEBUILDER_ASSETS" + // EnvAssetOverridePrefix is the environment variable prefix for per-binary + // location overrides. + EnvAssetOverridePrefix = "TEST_ASSET_" + // AssetsDefaultPath is the default location to look for test binaries in, + // if no override was provided. + AssetsDefaultPath = "/usr/local/kubebuilder/bin" +) + +// BinPathFinder finds the path to the given named binary, using the following locations +// in order of precedence (highest first). Notice that the various env vars only need +// to be set -- the asset is not checked for existence on the filesystem. +// +// 1. TEST_ASSET_{tr/a-z-/A-Z_/} (if set; asset overrides -- EnvAssetOverridePrefix) +// 1. KUBEBUILDER_ASSETS (if set; global asset path -- EnvAssetsPath) +// 3. assetDirectory (if set; per-config asset directory) +// 4. /usr/local/kubebuilder/bin (AssetsDefaultPath). +func BinPathFinder(symbolicName, assetDirectory string) (binPath string) { + punctuationPattern := regexp.MustCompile("[^A-Z0-9]+") + sanitizedName := punctuationPattern.ReplaceAllString(strings.ToUpper(symbolicName), "_") + leadingNumberPattern := regexp.MustCompile("^[0-9]+") + sanitizedName = leadingNumberPattern.ReplaceAllString(sanitizedName, "") + envVar := EnvAssetOverridePrefix + sanitizedName + + // TEST_ASSET_XYZ + if val, ok := os.LookupEnv(envVar); ok { + return val + } + + // KUBEBUILDER_ASSETS + if val, ok := os.LookupEnv(EnvAssetsPath); ok { + return filepath.Join(val, symbolicName) + } + + // assetDirectory + if assetDirectory != "" { + return filepath.Join(assetDirectory, symbolicName) + } + + // default path + return filepath.Join(AssetsDefaultPath, symbolicName) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/process.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/process.go new file mode 100644 index 000000000..531021bb2 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/process.go @@ -0,0 +1,277 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "crypto/tls" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "os/exec" + "path" + "regexp" + "sync" + "syscall" + "time" +) + +// ListenAddr represents some listening address and port. +type ListenAddr struct { + Address string + Port string +} + +// URL returns a URL for this address with the given scheme and subpath. +func (l *ListenAddr) URL(scheme string, path string) *url.URL { + return &url.URL{ + Scheme: scheme, + Host: l.HostPort(), + Path: path, + } +} + +// HostPort returns the joined host-port pair for this address. +func (l *ListenAddr) HostPort() string { + return net.JoinHostPort(l.Address, l.Port) +} + +// HealthCheck describes the information needed to health-check a process via +// some health-check URL. +type HealthCheck struct { + url.URL + + // HealthCheckPollInterval is the interval which will be used for polling the + // endpoint described by Host, Port, and Path. + // + // If left empty it will default to 100 Milliseconds. + PollInterval time.Duration +} + +// State define the state of the process. +type State struct { + Cmd *exec.Cmd + + // HealthCheck describes how to check if this process is up. If we get an http.StatusOK, + // we assume the process is ready to operate. + // + // For example, the /healthz endpoint of the k8s API server, or the /health endpoint of etcd. + HealthCheck HealthCheck + + Args []string + + StopTimeout time.Duration + StartTimeout time.Duration + + Dir string + DirNeedsCleaning bool + Path string + + // ready holds whether the process is currently in ready state (hit the ready condition) or not. + // It will be set to true on a successful `Start()` and set to false on a successful `Stop()` + ready bool + + // waitDone is closed when our call to wait finishes up, and indicates that + // our process has terminated. + waitDone chan struct{} + errMu sync.Mutex + exitErr error + exited bool +} + +// Init sets up this process, configuring binary paths if missing, initializing +// temporary directories, etc. +// +// This defaults all defaultable fields. +func (ps *State) Init(name string) error { + if ps.Path == "" { + if name == "" { + return fmt.Errorf("must have at least one of name or path") + } + ps.Path = BinPathFinder(name, "") + } + + if ps.Dir == "" { + newDir, err := ioutil.TempDir("", "k8s_test_framework_") + if err != nil { + return err + } + ps.Dir = newDir + ps.DirNeedsCleaning = true + } + + if ps.StartTimeout == 0 { + ps.StartTimeout = 20 * time.Second + } + + if ps.StopTimeout == 0 { + ps.StopTimeout = 20 * time.Second + } + return nil +} + +type stopChannel chan struct{} + +// CheckFlag checks the help output of this command for the presence of the given flag, specified +// without the leading `--` (e.g. `CheckFlag("insecure-port")` checks for `--insecure-port`), +// returning true if the flag is present. +func (ps *State) CheckFlag(flag string) (bool, error) { + cmd := exec.Command(ps.Path, "--help") + outContents, err := cmd.CombinedOutput() + if err != nil { + return false, fmt.Errorf("unable to run command %q to check for flag %q: %w", ps.Path, flag, err) + } + pat := `(?m)^\s*--` + flag + `\b` // (m --> multi-line --> ^ matches start of line) + matched, err := regexp.Match(pat, outContents) + if err != nil { + return false, fmt.Errorf("unable to check command %q for flag %q in help output: %w", ps.Path, flag, err) + } + return matched, nil +} + +// Start starts the apiserver, waits for it to come up, and returns an error, +// if occurred. +func (ps *State) Start(stdout, stderr io.Writer) (err error) { + if ps.ready { + return nil + } + + ps.Cmd = exec.Command(ps.Path, ps.Args...) + ps.Cmd.Stdout = stdout + ps.Cmd.Stderr = stderr + + ready := make(chan bool) + timedOut := time.After(ps.StartTimeout) + pollerStopCh := make(stopChannel) + go pollURLUntilOK(ps.HealthCheck.URL, ps.HealthCheck.PollInterval, ready, pollerStopCh) + + ps.waitDone = make(chan struct{}) + + if err := ps.Cmd.Start(); err != nil { + ps.errMu.Lock() + defer ps.errMu.Unlock() + ps.exited = true + return err + } + go func() { + defer close(ps.waitDone) + err := ps.Cmd.Wait() + + ps.errMu.Lock() + defer ps.errMu.Unlock() + ps.exitErr = err + ps.exited = true + }() + + select { + case <-ready: + ps.ready = true + return nil + case <-ps.waitDone: + if pollerStopCh != nil { + close(pollerStopCh) + } + return fmt.Errorf("timeout waiting for process %s to start successfully "+ + "(it may have failed to start, or stopped unexpectedly before becoming ready)", + path.Base(ps.Path)) + case <-timedOut: + if pollerStopCh != nil { + close(pollerStopCh) + } + if ps.Cmd != nil { + // intentionally ignore this -- we might've crashed, failed to start, etc + ps.Cmd.Process.Signal(syscall.SIGTERM) //nolint:errcheck + } + return fmt.Errorf("timeout waiting for process %s to start", path.Base(ps.Path)) + } +} + +// Exited returns true if the process exited, and may also +// return an error (as per Cmd.Wait) if the process did not +// exit with error code 0. +func (ps *State) Exited() (bool, error) { + ps.errMu.Lock() + defer ps.errMu.Unlock() + return ps.exited, ps.exitErr +} + +func pollURLUntilOK(url url.URL, interval time.Duration, ready chan bool, stopCh stopChannel) { + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + // there's probably certs *somewhere*, + // but it's fine to just skip validating + // them for health checks during testing + InsecureSkipVerify: true, //nolint:gosec + }, + }, + } + if interval <= 0 { + interval = 100 * time.Millisecond + } + for { + res, err := client.Get(url.String()) + if err == nil { + res.Body.Close() + if res.StatusCode == http.StatusOK { + ready <- true + return + } + } + + select { + case <-stopCh: + return + default: + time.Sleep(interval) + } + } +} + +// Stop stops this process gracefully, waits for its termination, and cleans up +// the CertDir if necessary. +func (ps *State) Stop() error { + // Always clear the directory if we need to. + defer func() { + if ps.DirNeedsCleaning { + _ = os.RemoveAll(ps.Dir) + } + }() + if ps.Cmd == nil { + return nil + } + if done, _ := ps.Exited(); done { + return nil + } + if err := ps.Cmd.Process.Signal(syscall.SIGTERM); err != nil { + return fmt.Errorf("unable to signal for process %s to stop: %w", ps.Path, err) + } + + timedOut := time.After(ps.StopTimeout) + + select { + case <-ps.waitDone: + break + case <-timedOut: + return fmt.Errorf("timeout waiting for process %s to stop", path.Base(ps.Path)) + } + ps.ready = false + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go new file mode 100644 index 000000000..c15e73cff --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go @@ -0,0 +1,188 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "sync" + + "github.com/go-logr/logr" +) + +// loggerPromise knows how to populate a concrete logr.Logger +// with options, given an actual base logger later on down the line. +type loggerPromise struct { + logger *DelegatingLogSink + childPromises []*loggerPromise + promisesLock sync.Mutex + + name *string + tags []interface{} +} + +func (p *loggerPromise) WithName(l *DelegatingLogSink, name string) *loggerPromise { + res := &loggerPromise{ + logger: l, + name: &name, + promisesLock: sync.Mutex{}, + } + + p.promisesLock.Lock() + defer p.promisesLock.Unlock() + p.childPromises = append(p.childPromises, res) + return res +} + +// WithValues provides a new Logger with the tags appended. +func (p *loggerPromise) WithValues(l *DelegatingLogSink, tags ...interface{}) *loggerPromise { + res := &loggerPromise{ + logger: l, + tags: tags, + promisesLock: sync.Mutex{}, + } + + p.promisesLock.Lock() + defer p.promisesLock.Unlock() + p.childPromises = append(p.childPromises, res) + return res +} + +// Fulfill instantiates the Logger with the provided logger. +func (p *loggerPromise) Fulfill(parentLogSink logr.LogSink) { + sink := parentLogSink + if p.name != nil { + sink = sink.WithName(*p.name) + } + + if p.tags != nil { + sink = sink.WithValues(p.tags...) + } + + p.logger.lock.Lock() + p.logger.logger = sink + p.logger.promise = nil + p.logger.lock.Unlock() + + for _, childPromise := range p.childPromises { + childPromise.Fulfill(sink) + } +} + +// DelegatingLogSink is a logsink that delegates to another logr.LogSink. +// If the underlying promise is not nil, it registers calls to sub-loggers with +// the logging factory to be populated later, and returns a new delegating +// logger. It expects to have *some* logr.Logger set at all times (generally +// a no-op logger before the promises are fulfilled). +type DelegatingLogSink struct { + lock sync.RWMutex + logger logr.LogSink + promise *loggerPromise + info logr.RuntimeInfo +} + +// Init implements logr.LogSink. +func (l *DelegatingLogSink) Init(info logr.RuntimeInfo) { + l.lock.Lock() + defer l.lock.Unlock() + l.info = info +} + +// Enabled tests whether this Logger is enabled. For example, commandline +// flags might be used to set the logging verbosity and disable some info +// logs. +func (l *DelegatingLogSink) Enabled(level int) bool { + l.lock.RLock() + defer l.lock.RUnlock() + return l.logger.Enabled(level) +} + +// Info logs a non-error message with the given key/value pairs as context. +// +// The msg argument should be used to add some constant description to +// the log line. The key/value pairs can then be used to add additional +// variable information. The key/value pairs should alternate string +// keys and arbitrary values. +func (l *DelegatingLogSink) Info(level int, msg string, keysAndValues ...interface{}) { + l.lock.RLock() + defer l.lock.RUnlock() + l.logger.Info(level, msg, keysAndValues...) +} + +// Error logs an error, with the given message and key/value pairs as context. +// It functions similarly to calling Info with the "error" named value, but may +// have unique behavior, and should be preferred for logging errors (see the +// package documentations for more information). +// +// The msg field should be used to add context to any underlying error, +// while the err field should be used to attach the actual error that +// triggered this log line, if present. +func (l *DelegatingLogSink) Error(err error, msg string, keysAndValues ...interface{}) { + l.lock.RLock() + defer l.lock.RUnlock() + l.logger.Error(err, msg, keysAndValues...) +} + +// WithName provides a new Logger with the name appended. +func (l *DelegatingLogSink) WithName(name string) logr.LogSink { + l.lock.RLock() + defer l.lock.RUnlock() + + if l.promise == nil { + return l.logger.WithName(name) + } + + res := &DelegatingLogSink{logger: l.logger} + promise := l.promise.WithName(res, name) + res.promise = promise + + return res +} + +// WithValues provides a new Logger with the tags appended. +func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { + l.lock.RLock() + defer l.lock.RUnlock() + + if l.promise == nil { + return l.logger.WithValues(tags...) + } + + res := &DelegatingLogSink{logger: l.logger} + promise := l.promise.WithValues(res, tags...) + res.promise = promise + + return res +} + +// Fulfill switches the logger over to use the actual logger +// provided, instead of the temporary initial one, if this method +// has not been previously called. +func (l *DelegatingLogSink) Fulfill(actual logr.LogSink) { + if l.promise != nil { + l.promise.Fulfill(actual) + } +} + +// NewDelegatingLogSink constructs a new DelegatingLogSink which uses +// the given logger before it's promise is fulfilled. +func NewDelegatingLogSink(initial logr.LogSink) *DelegatingLogSink { + l := &DelegatingLogSink{ + logger: initial, + promise: &loggerPromise{promisesLock: sync.Mutex{}}, + } + l.promise.logger = l + return l +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go new file mode 100644 index 000000000..76950cc34 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go @@ -0,0 +1,102 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package log contains utilities for fetching a new logger +// when one is not already available. +// +// The Log Handle +// +// This package contains a root logr.Logger Log. It may be used to +// get a handle to whatever the root logging implementation is. By +// default, no implementation exists, and the handle returns "promises" +// to loggers. When the implementation is set using SetLogger, these +// "promises" will be converted over to real loggers. +// +// Logr +// +// All logging in controller-runtime is structured, using a set of interfaces +// defined by a package called logr +// (https://godoc.org/github.com/go-logr/logr). The sub-package zap provides +// helpers for setting up logr backed by Zap (go.uber.org/zap). +package log + +import ( + "context" + "sync" + "time" + + "github.com/go-logr/logr" +) + +// SetLogger sets a concrete logging implementation for all deferred Loggers. +func SetLogger(l logr.Logger) { + loggerWasSetLock.Lock() + defer loggerWasSetLock.Unlock() + + loggerWasSet = true + dlog.Fulfill(l.GetSink()) +} + +// It is safe to assume that if this wasn't set within the first 30 seconds of a binaries +// lifetime, it will never get set. The DelegatingLogSink causes a high number of memory +// allocations when not given an actual Logger, so we set a NullLogSink to avoid that. +// +// We need to keep the DelegatingLogSink because we have various inits() that get a logger from +// here. They will always get executed before any code that imports controller-runtime +// has a chance to run and hence to set an actual logger. +func init() { + // Init is blocking, so start a new goroutine + go func() { + time.Sleep(30 * time.Second) + loggerWasSetLock.Lock() + defer loggerWasSetLock.Unlock() + if !loggerWasSet { + dlog.Fulfill(NullLogSink{}) + } + }() +} + +var ( + loggerWasSetLock sync.Mutex + loggerWasSet bool +) + +// Log is the base logger used by kubebuilder. It delegates +// to another logr.Logger. You *must* call SetLogger to +// get any actual logging. If SetLogger is not called within +// the first 30 seconds of a binaries lifetime, it will get +// set to a NullLogSink. +var ( + dlog = NewDelegatingLogSink(NullLogSink{}) + Log = logr.New(dlog) +) + +// FromContext returns a logger with predefined values from a context.Context. +func FromContext(ctx context.Context, keysAndValues ...interface{}) logr.Logger { + log := Log + if ctx != nil { + if logger, err := logr.FromContext(ctx); err == nil { + log = logger + } + } + return log.WithValues(keysAndValues...) +} + +// IntoContext takes a context and sets the logger as one of its values. +// Use FromContext function to retrieve the logger. +func IntoContext(ctx context.Context, log logr.Logger) context.Context { + return logr.NewContext(ctx, log) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go new file mode 100644 index 000000000..f3e81074f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go @@ -0,0 +1,59 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "github.com/go-logr/logr" +) + +// NB: this is the same as the null logger logr/testing, +// but avoids accidentally adding the testing flags to +// all binaries. + +// NullLogSink is a logr.Logger that does nothing. +type NullLogSink struct{} + +var _ logr.LogSink = NullLogSink{} + +// Init implements logr.LogSink. +func (log NullLogSink) Init(logr.RuntimeInfo) { +} + +// Info implements logr.InfoLogger. +func (NullLogSink) Info(_ int, _ string, _ ...interface{}) { + // Do nothing. +} + +// Enabled implements logr.InfoLogger. +func (NullLogSink) Enabled(level int) bool { + return false +} + +// Error implements logr.Logger. +func (NullLogSink) Error(_ error, _ string, _ ...interface{}) { + // Do nothing. +} + +// WithName implements logr.Logger. +func (log NullLogSink) WithName(_ string) logr.LogSink { + return log +} + +// WithValues implements logr.Logger. +func (log NullLogSink) WithValues(_ ...interface{}) logr.LogSink { + return log +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go new file mode 100644 index 000000000..3012fdd41 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go @@ -0,0 +1,76 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "sync" + + "github.com/go-logr/logr" +) + +// KubeAPIWarningLoggerOptions controls the behavior +// of a rest.WarningHandler constructed using NewKubeAPIWarningLogger(). +type KubeAPIWarningLoggerOptions struct { + // Deduplicate indicates a given warning message should only be written once. + // Setting this to true in a long-running process handling many warnings can + // result in increased memory use. + Deduplicate bool +} + +// KubeAPIWarningLogger is a wrapper around +// a provided logr.Logger that implements the +// rest.WarningHandler interface. +type KubeAPIWarningLogger struct { + // logger is used to log responses with the warning header + logger logr.Logger + // opts contain options controlling warning output + opts KubeAPIWarningLoggerOptions + // writtenLock gurads written + writtenLock sync.Mutex + // used to keep track of already logged messages + // and help in de-duplication. + written map[string]struct{} +} + +// HandleWarningHeader handles logging for responses from API server that are +// warnings with code being 299 and uses a logr.Logger for it's logging purposes. +func (l *KubeAPIWarningLogger) HandleWarningHeader(code int, agent string, message string) { + if code != 299 || len(message) == 0 { + return + } + + if l.opts.Deduplicate { + l.writtenLock.Lock() + defer l.writtenLock.Unlock() + + if _, alreadyLogged := l.written[message]; alreadyLogged { + return + } + l.written[message] = struct{}{} + } + l.logger.Info(message) +} + +// NewKubeAPIWarningLogger returns an implementation of rest.WarningHandler that logs warnings +// with code = 299 to the provided logr.Logger. +func NewKubeAPIWarningLogger(l logr.Logger, opts KubeAPIWarningLoggerOptions) *KubeAPIWarningLogger { + h := &KubeAPIWarningLogger{logger: l, opts: opts} + if opts.Deduplicate { + h.written = map[string]struct{}{} + } + return h +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go new file mode 100644 index 000000000..a5b7a282c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go @@ -0,0 +1,345 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package conversion provides implementation for CRD conversion webhook that implements handler for version conversion requests for types that are convertible. + +See pkg/conversion for interface definitions required to ensure an API Type is convertible. +*/ +package conversion + +import ( + "encoding/json" + "fmt" + "net/http" + + apix "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/conversion" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var ( + log = logf.Log.WithName("conversion-webhook") +) + +// Webhook implements a CRD conversion webhook HTTP handler. +type Webhook struct { + scheme *runtime.Scheme + decoder *Decoder +} + +// InjectScheme injects a scheme into the webhook, in order to construct a Decoder. +func (wh *Webhook) InjectScheme(s *runtime.Scheme) error { + var err error + wh.scheme = s + wh.decoder, err = NewDecoder(s) + if err != nil { + return err + } + + return nil +} + +// ensure Webhook implements http.Handler +var _ http.Handler = &Webhook{} + +func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { + convertReview := &apix.ConversionReview{} + err := json.NewDecoder(r.Body).Decode(convertReview) + if err != nil { + log.Error(err, "failed to read conversion request") + w.WriteHeader(http.StatusBadRequest) + return + } + + // TODO(droot): may be move the conversion logic to a separate module to + // decouple it from the http layer ? + resp, err := wh.handleConvertRequest(convertReview.Request) + if err != nil { + log.Error(err, "failed to convert", "request", convertReview.Request.UID) + convertReview.Response = errored(err) + } else { + convertReview.Response = resp + } + convertReview.Response.UID = convertReview.Request.UID + convertReview.Request = nil + + err = json.NewEncoder(w).Encode(convertReview) + if err != nil { + log.Error(err, "failed to write response") + return + } +} + +// handles a version conversion request. +func (wh *Webhook) handleConvertRequest(req *apix.ConversionRequest) (*apix.ConversionResponse, error) { + if req == nil { + return nil, fmt.Errorf("conversion request is nil") + } + var objects []runtime.RawExtension + + for _, obj := range req.Objects { + src, gvk, err := wh.decoder.Decode(obj.Raw) + if err != nil { + return nil, err + } + dst, err := wh.allocateDstObject(req.DesiredAPIVersion, gvk.Kind) + if err != nil { + return nil, err + } + err = wh.convertObject(src, dst) + if err != nil { + return nil, err + } + objects = append(objects, runtime.RawExtension{Object: dst}) + } + return &apix.ConversionResponse{ + UID: req.UID, + ConvertedObjects: objects, + Result: metav1.Status{ + Status: metav1.StatusSuccess, + }, + }, nil +} + +// convertObject will convert given a src object to dst object. +// Note(droot): couldn't find a way to reduce the cyclomatic complexity under 10 +// without compromising readability, so disabling gocyclo linter +func (wh *Webhook) convertObject(src, dst runtime.Object) error { + srcGVK := src.GetObjectKind().GroupVersionKind() + dstGVK := dst.GetObjectKind().GroupVersionKind() + + if srcGVK.GroupKind() != dstGVK.GroupKind() { + return fmt.Errorf("src %T and dst %T does not belong to same API Group", src, dst) + } + + if srcGVK == dstGVK { + return fmt.Errorf("conversion is not allowed between same type %T", src) + } + + srcIsHub, dstIsHub := isHub(src), isHub(dst) + srcIsConvertible, dstIsConvertible := isConvertible(src), isConvertible(dst) + + switch { + case srcIsHub && dstIsConvertible: + return dst.(conversion.Convertible).ConvertFrom(src.(conversion.Hub)) + case dstIsHub && srcIsConvertible: + return src.(conversion.Convertible).ConvertTo(dst.(conversion.Hub)) + case srcIsConvertible && dstIsConvertible: + return wh.convertViaHub(src.(conversion.Convertible), dst.(conversion.Convertible)) + default: + return fmt.Errorf("%T is not convertible to %T", src, dst) + } +} + +func (wh *Webhook) convertViaHub(src, dst conversion.Convertible) error { + hub, err := wh.getHub(src) + if err != nil { + return err + } + + if hub == nil { + return fmt.Errorf("%s does not have any Hub defined", src) + } + + err = src.ConvertTo(hub) + if err != nil { + return fmt.Errorf("%T failed to convert to hub version %T : %w", src, hub, err) + } + + err = dst.ConvertFrom(hub) + if err != nil { + return fmt.Errorf("%T failed to convert from hub version %T : %w", dst, hub, err) + } + + return nil +} + +// getHub returns an instance of the Hub for passed-in object's group/kind. +func (wh *Webhook) getHub(obj runtime.Object) (conversion.Hub, error) { + gvks, err := objectGVKs(wh.scheme, obj) + if err != nil { + return nil, err + } + if len(gvks) == 0 { + return nil, fmt.Errorf("error retrieving gvks for object : %v", obj) + } + + var hub conversion.Hub + var hubFoundAlready bool + for _, gvk := range gvks { + instance, err := wh.scheme.New(gvk) + if err != nil { + return nil, fmt.Errorf("failed to allocate an instance for gvk %v: %w", gvk, err) + } + if val, isHub := instance.(conversion.Hub); isHub { + if hubFoundAlready { + return nil, fmt.Errorf("multiple hub version defined for %T", obj) + } + hubFoundAlready = true + hub = val + } + } + return hub, nil +} + +// allocateDstObject returns an instance for a given GVK. +func (wh *Webhook) allocateDstObject(apiVersion, kind string) (runtime.Object, error) { + gvk := schema.FromAPIVersionAndKind(apiVersion, kind) + + obj, err := wh.scheme.New(gvk) + if err != nil { + return obj, err + } + + t, err := meta.TypeAccessor(obj) + if err != nil { + return obj, err + } + + t.SetAPIVersion(apiVersion) + t.SetKind(kind) + + return obj, nil +} + +// IsConvertible determines if given type is convertible or not. For a type +// to be convertible, the group-kind needs to have a Hub type defined and all +// non-hub types must be able to convert to/from Hub. +func IsConvertible(scheme *runtime.Scheme, obj runtime.Object) (bool, error) { + var hubs, spokes, nonSpokes []runtime.Object + + gvks, err := objectGVKs(scheme, obj) + if err != nil { + return false, err + } + if len(gvks) == 0 { + return false, fmt.Errorf("error retrieving gvks for object : %v", obj) + } + + for _, gvk := range gvks { + instance, err := scheme.New(gvk) + if err != nil { + return false, fmt.Errorf("failed to allocate an instance for gvk %v: %w", gvk, err) + } + + if isHub(instance) { + hubs = append(hubs, instance) + continue + } + + if !isConvertible(instance) { + nonSpokes = append(nonSpokes, instance) + continue + } + + spokes = append(spokes, instance) + } + + if len(gvks) == 1 { + return false, nil // single version + } + + if len(hubs) == 0 && len(spokes) == 0 { + // multiple version detected with no conversion implementation. This is + // true for multi-version built-in types. + return false, nil + } + + if len(hubs) == 1 && len(nonSpokes) == 0 { // convertible + return true, nil + } + + return false, PartialImplementationError{ + hubs: hubs, + nonSpokes: nonSpokes, + spokes: spokes, + } +} + +// objectGVKs returns all (Group,Version,Kind) for the Group/Kind of given object. +func objectGVKs(scheme *runtime.Scheme, obj runtime.Object) ([]schema.GroupVersionKind, error) { + // NB: we should not use `obj.GetObjectKind().GroupVersionKind()` to get the + // GVK here, since it is parsed from apiVersion and kind fields and it may + // return empty GVK if obj is an uninitialized object. + objGVKs, _, err := scheme.ObjectKinds(obj) + if err != nil { + return nil, err + } + if len(objGVKs) != 1 { + return nil, fmt.Errorf("expect to get only one GVK for %v", obj) + } + objGVK := objGVKs[0] + knownTypes := scheme.AllKnownTypes() + + var gvks []schema.GroupVersionKind + for gvk := range knownTypes { + if objGVK.GroupKind() == gvk.GroupKind() { + gvks = append(gvks, gvk) + } + } + return gvks, nil +} + +// PartialImplementationError represents an error due to partial conversion +// implementation such as hub without spokes, multiple hubs or spokes without hub. +type PartialImplementationError struct { + gvk schema.GroupVersionKind + hubs []runtime.Object + nonSpokes []runtime.Object + spokes []runtime.Object +} + +func (e PartialImplementationError) Error() string { + if len(e.hubs) == 0 { + return fmt.Sprintf("no hub defined for gvk %s", e.gvk) + } + if len(e.hubs) > 1 { + return fmt.Sprintf("multiple(%d) hubs defined for group-kind '%s' ", + len(e.hubs), e.gvk.GroupKind()) + } + if len(e.nonSpokes) > 0 { + return fmt.Sprintf("%d inconvertible types detected for group-kind '%s'", + len(e.nonSpokes), e.gvk.GroupKind()) + } + return "" +} + +// isHub determines if passed-in object is a Hub or not. +func isHub(obj runtime.Object) bool { + _, yes := obj.(conversion.Hub) + return yes +} + +// isConvertible determines if passed-in object is a convertible. +func isConvertible(obj runtime.Object) bool { + _, yes := obj.(conversion.Convertible) + return yes +} + +// helper to construct error response. +func errored(err error) *apix.ConversionResponse { + return &apix.ConversionResponse{ + Result: metav1.Status{ + Status: metav1.StatusFailure, + Message: err.Error(), + }, + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go new file mode 100644 index 000000000..6a9e9c236 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +// Decoder knows how to decode the contents of a CRD version conversion +// request into a concrete object. +// TODO(droot): consider reusing decoder from admission pkg for this. +type Decoder struct { + codecs serializer.CodecFactory +} + +// NewDecoder creates a Decoder given the runtime.Scheme +func NewDecoder(scheme *runtime.Scheme) (*Decoder, error) { + return &Decoder{codecs: serializer.NewCodecFactory(scheme)}, nil +} + +// Decode decodes the inlined object. +func (d *Decoder) Decode(content []byte) (runtime.Object, *schema.GroupVersionKind, error) { + deserializer := d.codecs.UniversalDeserializer() + return deserializer.Decode(content, nil, nil) +} + +// DecodeInto decodes the inlined object in the into the passed-in runtime.Object. +func (d *Decoder) DecodeInto(content []byte, into runtime.Object) error { + deserializer := d.codecs.UniversalDeserializer() + return runtime.DecodeInto(deserializer, content, into) +} From 6c779c670ae6b43705b380307c98fe690cf14fcf Mon Sep 17 00:00:00 2001 From: Martin Weindel Date: Tue, 19 Jul 2022 15:48:55 +0200 Subject: [PATCH 6/7] changes after review with mandelsoft --- pkg/controller/provider/aws/aliastarget.go | 5 +- pkg/controller/provider/aws/execution.go | 20 +- pkg/controller/provider/aws/handler.go | 24 +- .../provider/azure-private/execution.go | 12 +- pkg/controller/provider/azure/execution.go | 12 +- pkg/controller/provider/google/execution.go | 21 +- .../provider/openstack/execution.go | 10 +- pkg/controller/provider/remote/handler.go | 3 +- pkg/controller/source/ingress/handler.go | 2 +- pkg/dns/dnsset.go | 50 +- pkg/dns/mapping.go | 9 +- pkg/dns/mapping_test.go | 2 +- pkg/dns/provider/changemodel.go | 94 +-- pkg/dns/provider/dedicatedrecord.go | 2 +- pkg/dns/provider/entry.go | 30 +- pkg/dns/provider/inmemory.go | 3 +- pkg/dns/provider/raw/execution.go | 6 +- pkg/dns/provider/raw/records.go | 2 +- pkg/dns/provider/state_zone.go | 2 +- pkg/dns/records.go | 28 +- pkg/dns/source/defaults.go | 2 +- pkg/dns/source/interface.go | 2 +- pkg/server/remote/common/remote.pb.go | 574 +++++++++--------- pkg/server/remote/common/remote.proto | 13 +- pkg/server/remote/conversion/conversion.go | 20 +- .../remote/conversion/conversion_test.go | 28 +- 26 files changed, 492 insertions(+), 484 deletions(-) diff --git a/pkg/controller/provider/aws/aliastarget.go b/pkg/controller/provider/aws/aliastarget.go index 1e1749813..6ce84283b 100644 --- a/pkg/controller/provider/aws/aliastarget.go +++ b/pkg/controller/provider/aws/aliastarget.go @@ -94,11 +94,10 @@ func buildRecordSetFromAliasTarget(r *route53.ResourceRecordSet) *dns.RecordSet rs := dns.NewRecordSet(dns.RS_ALIAS, 0, nil) rs.IgnoreTTL = true // alias target has no settable TTL rs.Add(&dns.Record{Value: dns.NormalizeHostname(aws.StringValue(r.AliasTarget.DNSName))}) - rs.RoutingPolicy = extractRoutingPolicy(r) return rs } -func buildResourceRecordSetForAliasTarget(name dns.DNSSetName, rset *dns.RecordSet) (*route53.ResourceRecordSet, error) { +func buildResourceRecordSetForAliasTarget(name dns.DNSSetName, policy *dns.RoutingPolicy, rset *dns.RecordSet) (*route53.ResourceRecordSet, error) { target := dns.NormalizeHostname(rset.Records[0].Value) hostedZone := canonicalHostedZone(target) if hostedZone == "" { @@ -115,7 +114,7 @@ func buildResourceRecordSetForAliasTarget(name dns.DNSSetName, rset *dns.RecordS Type: aws.String(route53.RRTypeA), AliasTarget: aliasTarget, } - if err := addRoutingPolicy(rrset, name, rset.RoutingPolicy); err != nil { + if err := addRoutingPolicy(rrset, name, policy); err != nil { return nil, err } return rrset, nil diff --git a/pkg/controller/provider/aws/execution.go b/pkg/controller/provider/aws/execution.go index 7ff1c0af6..7b22ede38 100644 --- a/pkg/controller/provider/aws/execution.go +++ b/pkg/controller/provider/aws/execution.go @@ -59,7 +59,7 @@ func NewExecution(logger logger.LogContext, h *Handler, zone provider.DNSHostedZ } } -func buildResourceRecordSet(name dns.DNSSetName, rset *dns.RecordSet) (*route53.ResourceRecordSet, error) { +func buildResourceRecordSet(name dns.DNSSetName, policy *dns.RoutingPolicy, rset *dns.RecordSet) (*route53.ResourceRecordSet, error) { rrs := &route53.ResourceRecordSet{} rrs.Name = aws.String(name.DNSName) rrs.Type = aws.String(rset.Type) @@ -70,14 +70,14 @@ func buildResourceRecordSet(name dns.DNSSetName, rset *dns.RecordSet) (*route53. Value: aws.String(r.Value), } } - if err := addRoutingPolicy(rrs, name, rset.RoutingPolicy); err != nil { + if err := addRoutingPolicy(rrs, name, policy); err != nil { return nil, err } return rrs, nil } func (this *Execution) addChange(action string, req *provider.ChangeRequest, dnsset *dns.DNSSet) error { - name, rset := dns.MapToProviderEx(req.Type, dnsset, this.zone.Domain(), req.RoutingPolicy) + name, rset := dns.MapToProvider(req.Type, dnsset, this.zone.Domain()) name = name.Align() if len(rset.Records) == 0 { return nil @@ -86,10 +86,16 @@ func (this *Execution) addChange(action string, req *provider.ChangeRequest, dns var err error var rrs *route53.ResourceRecordSet + var policy *dns.RoutingPolicy + if req.Addition != nil { + policy = req.Addition.RoutingPolicy + } else if req.Deletion != nil { + policy = req.Deletion.RoutingPolicy + } if rset.Type == dns.RS_ALIAS { - rrs, err = buildResourceRecordSetForAliasTarget(name, rset) + rrs, err = buildResourceRecordSetForAliasTarget(name, policy, rset) } else { - rrs, err = buildResourceRecordSet(name, rset) + rrs, err = buildResourceRecordSet(name, policy, rset) } if err != nil { this.Errorf("addChange failed for %s[%s]: %s", name, this.zone.Id(), err) @@ -176,8 +182,8 @@ func (this *Execution) submitChanges(metrics provider.Metrics) error { return nil } -var patternNotFound = regexp.MustCompile("Tried to delete resource record set \\[name='([^']+)', type='([^']+)'\\] but it was not found") -var patternExists = regexp.MustCompile("Tried to create resource record set \\[name='([^']+)', type='([^']+)'\\] but it already exists") +var patternNotFound = regexp.MustCompile("Tried to delete resource record set \\[name='([^']+)', type='([^']+)'] but it was not found") +var patternExists = regexp.MustCompile("Tried to create resource record set \\[name='([^']+)', type='([^']+)'] but it already exists") func (this *Execution) tryFixChanges(message string, changes []*Change) (succeeded []*Change, failed []*Change, err error) { submatchNotFound := patternNotFound.FindAllStringSubmatch(message, -1) diff --git a/pkg/controller/provider/aws/handler.go b/pkg/controller/provider/aws/handler.go index 4c8d660d3..bd8eb8ea0 100644 --- a/pkg/controller/provider/aws/handler.go +++ b/pkg/controller/provider/aws/handler.go @@ -196,7 +196,6 @@ func (h *Handler) getZones(cache provider.ZoneCache) (provider.DNSHostedZones, e func buildRecordSet(r *route53.ResourceRecordSet) *dns.RecordSet { rs := dns.NewRecordSet(aws.StringValue(r.Type), aws.Int64Value(r.TTL), nil) - rs.RoutingPolicy = extractRoutingPolicy(r) for _, rr := range r.ResourceRecords { rs.Add(&dns.Record{Value: aws.StringValue(rr.Value)}) } @@ -218,7 +217,9 @@ func (h *Handler) getZoneState(zone provider.DNSHostedZone, cache provider.ZoneC } else { rs = buildRecordSet(r) } - dnssets.AddRecordSetFromProviderEx(dns.DNSSetName{DNSName: aws.StringValue(r.Name), SetIdentifier: aws.StringValue(r.SetIdentifier)}, rs) + name := dns.DNSSetName{DNSName: aws.StringValue(r.Name), SetIdentifier: aws.StringValue(r.SetIdentifier)} + policy := extractRoutingPolicy(r) + dnssets.AddRecordSetFromProviderEx(name, policy, rs) } } forwarded, err := h.handleRecordSets(zone, aggr) @@ -388,11 +389,11 @@ func (h *Handler) DeleteVPCAssociationAuthorization(hostedZoneId string, vpcId s return out, nil } -func (h *Handler) GetRecordSet(zone provider.DNSHostedZone, rsName dns.DNSSetName, recordType string) (provider.DedicatedRecordSet, error) { - name := rsName.Align() +func (h *Handler) GetRecordSet(zone provider.DNSHostedZone, setName dns.DNSSetName, recordType string) (provider.DedicatedRecordSet, error) { + name := setName.Align() var recordIdentifier *string - if rsName.SetIdentifier != "" { - recordIdentifier = &rsName.SetIdentifier + if setName.SetIdentifier != "" { + recordIdentifier = &setName.SetIdentifier } sets, err := h.r53.ListResourceRecordSets(&route53.ListResourceRecordSetsInput{ HostedZoneId: aws.String(zone.Id().ID), @@ -414,8 +415,9 @@ func (h *Handler) GetRecordSet(zone provider.DNSHostedZone, rsName dns.DNSSetNam } else { rs = buildRecordSet(r) } - rsName := dns.DNSSetName{DNSName: aws.StringValue(r.Name), SetIdentifier: aws.StringValue(r.SetIdentifier)} - dnssets.AddRecordSetFromProviderEx(rsName, rs) + routingPolicy := extractRoutingPolicy(r) + dnsSetName := dns.DNSSetName{DNSName: aws.StringValue(r.Name), SetIdentifier: aws.StringValue(r.SetIdentifier)} + dnssets.AddRecordSetFromProviderEx(dnsSetName, routingPolicy, rs) } } for _, r := range sets.ResourceRecordSets { @@ -423,8 +425,8 @@ func (h *Handler) GetRecordSet(zone provider.DNSHostedZone, rsName dns.DNSSetNam aggr(r) } } - if set := dnssets[rsName]; set != nil { - return provider.FromDedicatedRecordSet(rsName, set.Sets[recordType]), nil + if set := dnssets[setName]; set != nil { + return provider.FromDedicatedRecordSet(setName, set.Sets[recordType]), nil } return nil, nil } @@ -440,7 +442,7 @@ func (h *Handler) DeleteRecordSet(logger logger.LogContext, zone provider.DNSHos func (h *Handler) executeRecordSetChange(action string, logger logger.LogContext, zone provider.DNSHostedZone, rawrs provider.DedicatedRecordSet) error { exec := NewExecution(logger, h, zone) dnsName, rs := provider.ToDedicatedRecordset(rawrs) - dnsset := dns.NewDNSSet(dnsName) + dnsset := dns.NewDNSSet(dnsName, nil) dnsset.Sets[rs.Type] = rs if err := exec.addChange(action, &provider.ChangeRequest{Type: rs.Type}, dnsset); err != nil { return err diff --git a/pkg/controller/provider/azure-private/execution.go b/pkg/controller/provider/azure-private/execution.go index b3a21454f..2f301231c 100644 --- a/pkg/controller/provider/azure-private/execution.go +++ b/pkg/controller/provider/azure-private/execution.go @@ -65,8 +65,12 @@ func (exec *Execution) buildRecordSet(req *provider.ChangeRequest) (buildStatus, dnsset = req.Deletion } - name, rset := dns.MapToProvider(req.Type, dnsset, exec.zoneName) - name, ok := utils.DropZoneName(name, exec.zoneName) + if dnsset.RoutingPolicy != nil { + return bs_invalidRoutingPolicy, "", nil + } + + setName, rset := dns.MapToProvider(req.Type, dnsset, exec.zoneName) + name, ok := utils.DropZoneName(setName.DNSName, exec.zoneName) if !ok { return bs_invalidName, "", &azure.RecordSet{Name: &name} } @@ -75,10 +79,6 @@ func (exec *Execution) buildRecordSet(req *provider.ChangeRequest) (buildStatus, return bs_empty, "", nil } - if req.RoutingPolicy != nil { - return bs_invalidRoutingPolicy, "", nil - } - exec.Infof("Desired %s: %s record set %s[%s] with TTL %d: %s", req.Action, rset.Type, name, exec.zoneName, rset.TTL, rset.RecordString()) return exec.buildMappedRecordSet(name, rset) } diff --git a/pkg/controller/provider/azure/execution.go b/pkg/controller/provider/azure/execution.go index 0f682585e..703601143 100644 --- a/pkg/controller/provider/azure/execution.go +++ b/pkg/controller/provider/azure/execution.go @@ -65,8 +65,12 @@ func (exec *Execution) buildRecordSet(req *provider.ChangeRequest) (buildStatus, dnsset = req.Deletion } - name, rset := dns.MapToProvider(req.Type, dnsset, exec.zoneName) - name, ok := utils.DropZoneName(name, exec.zoneName) + if dnsset.RoutingPolicy != nil { + return bs_invalidRoutingPolicy, "", nil + } + + setName, rset := dns.MapToProvider(req.Type, dnsset, exec.zoneName) + name, ok := utils.DropZoneName(setName.DNSName, exec.zoneName) if !ok { return bs_invalidName, "", &azure.RecordSet{Name: &name} } @@ -75,10 +79,6 @@ func (exec *Execution) buildRecordSet(req *provider.ChangeRequest) (buildStatus, return bs_empty, "", nil } - if req.RoutingPolicy != nil { - return bs_invalidRoutingPolicy, "", nil - } - exec.Infof("Desired %s: %s record set %s[%s] with TTL %d: %s", req.Action, rset.Type, name, exec.zoneName, rset.TTL, rset.RecordString()) return exec.buildMappedRecordSet(name, rset) } diff --git a/pkg/controller/provider/google/execution.go b/pkg/controller/provider/google/execution.go index 62327c356..2e37526da 100644 --- a/pkg/controller/provider/google/execution.go +++ b/pkg/controller/provider/google/execution.go @@ -55,25 +55,26 @@ func NewExecution(logger logger.LogContext, h *Handler, zone provider.DNSHostedZ } func (this *Execution) addChange(req *provider.ChangeRequest) { - var name string + var setName dns.DNSSetName var newset, oldset *dns.RecordSet if req.Addition != nil { - name, newset = dns.MapToProvider(req.Type, req.Addition, this.zone.Domain()) + setName, newset = dns.MapToProvider(req.Type, req.Addition, this.zone.Domain()) + if req.Addition.RoutingPolicy != nil { + err := fmt.Errorf("Routing policies unsupported for " + TYPE_CODE) + if req.Done != nil { + req.Done.SetInvalid(err) + } + return + } } if req.Deletion != nil { - name, oldset = dns.MapToProvider(req.Type, req.Deletion, this.zone.Domain()) + setName, oldset = dns.MapToProvider(req.Type, req.Deletion, this.zone.Domain()) } + name := setName.DNSName if name == "" || (newset.Length() == 0 && oldset.Length() == 0) { return } - if req.RoutingPolicy != nil { - err := fmt.Errorf("Routing policies unsupported for " + TYPE_CODE) - if req.Done != nil { - req.Done.SetInvalid(err) - } - return - } name = dns.AlignHostname(name) switch req.Action { case provider.R_CREATE: diff --git a/pkg/controller/provider/openstack/execution.go b/pkg/controller/provider/openstack/execution.go index cc4b197d5..6ffb32ad2 100644 --- a/pkg/controller/provider/openstack/execution.go +++ b/pkg/controller/provider/openstack/execution.go @@ -61,18 +61,18 @@ func (exec *Execution) buildRecordSet(req *provider.ChangeRequest) (buildStatus, dnsset = req.Deletion } + if dnsset.RoutingPolicy != nil { + return bsInvalidRoutingPolicy, nil + } + name, rset := dns.MapToProvider(req.Type, dnsset, exec.zone.Domain()) if len(rset.Records) == 0 { return bsEmpty, nil } - if req.RoutingPolicy != nil { - return bsInvalidRoutingPolicy, nil - } - exec.Infof("Desired %s: %s record set %s[%s]: %s", req.Action, rset.Type, name, exec.zone.Domain(), rset.RecordString()) - return exec.buildMappedRecordSet(name, rset) + return exec.buildMappedRecordSet(name.DNSName, rset) } func (exec *Execution) buildMappedRecordSet(name string, rset *dns.RecordSet) (buildStatus, *recordsets.RecordSet) { diff --git a/pkg/controller/provider/remote/handler.go b/pkg/controller/provider/remote/handler.go index 3194c73a5..73869e212 100644 --- a/pkg/controller/provider/remote/handler.go +++ b/pkg/controller/provider/remote/handler.go @@ -269,7 +269,8 @@ func (h *Handler) executeRequests(logger logger.LogContext, zone provider.DNSHos var changeRequests []*common.ChangeRequest for _, req := range reqs { - if req.RoutingPolicy != nil && h.serverProtocolVersion != common.ProtocolVersion1 { + if h.serverProtocolVersion != common.ProtocolVersion1 && + (req.Addition != nil && req.Addition.RoutingPolicy != nil || req.Deletion != nil && req.Deletion.RoutingPolicy != nil) { err := fmt.Errorf("routing policy not supported by remote server version") logger.Warnf("%s", err) if req.Done != nil { diff --git a/pkg/controller/source/ingress/handler.go b/pkg/controller/source/ingress/handler.go index f5e2d4fd7..c65ab98f2 100644 --- a/pkg/controller/source/ingress/handler.go +++ b/pkg/controller/source/ingress/handler.go @@ -56,7 +56,7 @@ func (this *IngressSource) GetDNSInfo(logger logger.LogContext, obj resources.Ob if len(del) > 0 { return info, fmt.Errorf("annotated dns names %s not declared by ingress", del) } - info.Names = dns.NewDNSNameSetFromStringSet(names, current.SetIdentifier()) + info.Names = dns.NewDNSNameSetFromStringSet(names, current.GetSetIdentifier()) return info, nil } diff --git a/pkg/dns/dnsset.go b/pkg/dns/dnsset.go index ff9886bd9..e13dca594 100644 --- a/pkg/dns/dnsset.go +++ b/pkg/dns/dnsset.go @@ -61,20 +61,20 @@ type Ownership interface { } func (dnssets DNSSets) AddRecordSetFromProvider(dnsName string, rs *RecordSet) { - dnssets.AddRecordSetFromProviderEx(DNSSetName{DNSName: dnsName}, rs) + dnssets.AddRecordSetFromProviderEx(DNSSetName{DNSName: dnsName}, nil, rs) } -func (dnssets DNSSets) AddRecordSetFromProviderEx(rsName DNSSetName, rs *RecordSet) { - name := rsName.Normalize() +func (dnssets DNSSets) AddRecordSetFromProviderEx(setName DNSSetName, policy *RoutingPolicy, rs *RecordSet) { + name := setName.Normalize() name, rs = MapFromProvider(name, rs) - dnssets.AddRecordSet(name, rs) + dnssets.AddRecordSet(name, policy, rs) } -func (dnssets DNSSets) AddRecordSet(name DNSSetName, rs *RecordSet) { +func (dnssets DNSSets) AddRecordSet(name DNSSetName, policy *RoutingPolicy, rs *RecordSet) { dnsset := dnssets[name] if dnsset == nil { - dnsset = NewDNSSet(name) + dnsset = NewDNSSet(name, policy) dnssets[name] = dnsset } dnsset.Sets[rs.Type] = rs @@ -121,14 +121,16 @@ const ( ) type DNSSet struct { - Name DNSSetName - Kind string - UpdateGroup string - Sets RecordSets + Name DNSSetName + Kind string + UpdateGroup string + Sets RecordSets + RoutingPolicy *RoutingPolicy } func (this *DNSSet) Clone() *DNSSet { - return &DNSSet{Name: this.Name, Sets: this.Sets.Clone(), UpdateGroup: this.UpdateGroup, Kind: this.Kind} + return &DNSSet{Name: this.Name, Sets: this.Sets.Clone(), UpdateGroup: this.UpdateGroup, Kind: this.Kind, + RoutingPolicy: this.RoutingPolicy.Clone()} } func (this *DNSSet) getAttr(ty string, name string) string { @@ -139,10 +141,10 @@ func (this *DNSSet) getAttr(ty string, name string) string { return "" } -func (this *DNSSet) setAttr(ty string, name string, value string, policy *RoutingPolicy) { +func (this *DNSSet) setAttr(ty string, name string, value string) { rset := this.Sets[ty] if rset == nil { - rset = newAttrRecordSet(ty, name, value, policy) + rset = newAttrRecordSet(ty, name, value) this.Sets[rset.Type] = rset } else { rset.SetAttr(name, value) @@ -160,8 +162,8 @@ func (this *DNSSet) GetTxtAttr(name string) string { return this.getAttr(RS_TXT, name) } -func (this *DNSSet) SetTxtAttr(name string, value string, policy *RoutingPolicy) { - this.setAttr(RS_TXT, name, value, policy) +func (this *DNSSet) SetTxtAttr(name string, value string) { + this.setAttr(RS_TXT, name, value) } func (this *DNSSet) DeleteTxtAttr(name string) { @@ -172,8 +174,8 @@ func (this *DNSSet) GetMetaAttr(name string) string { return this.getAttr(RS_META, name) } -func (this *DNSSet) SetMetaAttr(name string, value string, policy *RoutingPolicy) { - this.setAttr(RS_META, name, value, policy) +func (this *DNSSet) SetMetaAttr(name string, value string) { + this.setAttr(RS_META, name, value) } func (this *DNSSet) DeleteMetaAttr(name string) { @@ -194,8 +196,8 @@ func (this *DNSSet) GetOwner() string { return this.GetMetaAttr(ATTR_OWNER) } -func (this *DNSSet) SetOwner(ownerid string, policy *RoutingPolicy) *DNSSet { - this.SetMetaAttr(ATTR_OWNER, ownerid, policy) +func (this *DNSSet) SetOwner(ownerid string) *DNSSet { + this.SetMetaAttr(ATTR_OWNER, ownerid) return this } @@ -213,7 +215,7 @@ func (this *DNSSet) SetKind(t string, prop ...bool) *DNSSet { this.Kind = t if t != api.DNSEntryKind { if len(prop) == 0 || prop[0] { - this.SetMetaAttr(ATTR_KIND, t, nil) + this.SetMetaAttr(ATTR_KIND, t) } } else { this.DeleteMetaAttr(ATTR_KIND) @@ -221,14 +223,14 @@ func (this *DNSSet) SetKind(t string, prop ...bool) *DNSSet { return this } -func (this *DNSSet) SetRecordSet(rtype string, ttl int64, routingPolicy *RoutingPolicy, values ...string) { +func (this *DNSSet) SetRecordSet(rtype string, ttl int64, values ...string) { records := make([]*Record, len(values)) for i, r := range values { records[i] = &Record{Value: r} } - this.Sets[rtype] = &RecordSet{Type: rtype, TTL: ttl, IgnoreTTL: false, RoutingPolicy: routingPolicy, Records: records} + this.Sets[rtype] = &RecordSet{Type: rtype, TTL: ttl, IgnoreTTL: false, Records: records} } -func NewDNSSet(name DNSSetName) *DNSSet { - return &DNSSet{Name: name, Sets: map[string]*RecordSet{}} +func NewDNSSet(name DNSSetName, routingPolicy *RoutingPolicy) *DNSSet { + return &DNSSet{Name: name, RoutingPolicy: routingPolicy, Sets: map[string]*RecordSet{}} } diff --git a/pkg/dns/mapping.go b/pkg/dns/mapping.go index a7b15f27e..d7fef61a0 100644 --- a/pkg/dns/mapping.go +++ b/pkg/dns/mapping.go @@ -43,19 +43,14 @@ func NormalizeHostname(host string) string { return host } -func MapToProvider(rtype string, dnsset *DNSSet, base string) (string, *RecordSet) { - rsName, rs := MapToProviderEx(rtype, dnsset, base, nil) - return rsName.DNSName, rs -} - -func MapToProviderEx(rtype string, dnsset *DNSSet, base string, policy *RoutingPolicy) (DNSSetName, *RecordSet) { +func MapToProvider(rtype string, dnsset *DNSSet, base string) (DNSSetName, *RecordSet) { dnsName := dnsset.Name.DNSName rs := dnsset.Sets[rtype] if rtype == RS_META { prefix := dnsset.GetMetaAttr(ATTR_PREFIX) if prefix == "" { prefix = TxtPrefix - dnsset.SetMetaAttr(ATTR_PREFIX, prefix, policy) + dnsset.SetMetaAttr(ATTR_PREFIX, prefix) } metaName := calcMetaRecordDomainName(dnsName, prefix, base) new := *dnsset.Sets[rtype] diff --git a/pkg/dns/mapping_test.go b/pkg/dns/mapping_test.go index 5717cde56..9e30ca7b7 100644 --- a/pkg/dns/mapping_test.go +++ b/pkg/dns/mapping_test.go @@ -80,7 +80,7 @@ func TestMapToFromProvider(t *testing.T) { Sets: RecordSets{RS_META: &RecordSet{Type: RS_META, TTL: 600, Records: inputRecords}}, } - actualName, actualRecordSet := MapToProviderEx(rtype, &dnsset, base, nil) + actualName, actualRecordSet := MapToProvider(rtype, &dnsset, base) Ω(actualName).Should(Equal(DNSSetName{DNSName: entry.wantedName}), "Name should match") Ω(actualRecordSet.Type).Should(Equal(RS_TXT), "Type mismatch") diff --git a/pkg/dns/provider/changemodel.go b/pkg/dns/provider/changemodel.go index 7e67847e4..cee2adcdc 100644 --- a/pkg/dns/provider/changemodel.go +++ b/pkg/dns/provider/changemodel.go @@ -18,6 +18,7 @@ package provider import ( "fmt" + "reflect" "sort" "strings" "time" @@ -44,17 +45,16 @@ const ( type ChangeRequests []*ChangeRequest type ChangeRequest struct { - Action string - Type string - Addition *dns.DNSSet - Deletion *dns.DNSSet - Done DoneHandler - Applied bool - RoutingPolicy *dns.RoutingPolicy + Action string + Type string + Addition *dns.DNSSet + Deletion *dns.DNSSet + Done DoneHandler + Applied bool } -func NewChangeRequest(action string, rtype string, del, add *dns.DNSSet, done DoneHandler, policy *dns.RoutingPolicy) *ChangeRequest { - r := &ChangeRequest{Action: action, Type: rtype, Addition: add, Deletion: del, RoutingPolicy: policy} +func NewChangeRequest(action string, rtype string, del, add *dns.DNSSet, done DoneHandler) *ChangeRequest { + r := &ChangeRequest{Action: action, Type: rtype, Addition: add, Deletion: del} r.Done = &applyingDoneHandler{changeRequest: r, inner: done} return r } @@ -133,7 +133,7 @@ func (this *ChangeGroup) cleanup(logger logger.LogContext, model *ChangeModel) b model.Infof("found unapplied managed set '%s'", s.Name) var done DoneHandler for _, e := range model.context.entries { - if e.rsname == s.Name { + if e.dnsSetName == s.Name { done = NewStatusUpdate(logger, e, model.context.fhandler) break } @@ -166,17 +166,17 @@ func (this *ChangeGroup) update(logger logger.LogContext, model *ChangeModel) bo return ok } -func (this *ChangeGroup) addCreateRequest(dnsset *dns.DNSSet, rtype string, done DoneHandler, policy *dns.RoutingPolicy) { - this.addChangeRequest(R_CREATE, nil, dnsset, rtype, done, policy) +func (this *ChangeGroup) addCreateRequest(dnsset *dns.DNSSet, rtype string, done DoneHandler) { + this.addChangeRequest(R_CREATE, nil, dnsset, rtype, done) } -func (this *ChangeGroup) addUpdateRequest(old, new *dns.DNSSet, rtype string, done DoneHandler, policy *dns.RoutingPolicy) { - this.addChangeRequest(R_UPDATE, old, new, rtype, done, policy) +func (this *ChangeGroup) addUpdateRequest(old, new *dns.DNSSet, rtype string, done DoneHandler) { + this.addChangeRequest(R_UPDATE, old, new, rtype, done) } func (this *ChangeGroup) addDeleteRequest(dnsset *dns.DNSSet, rtype string, done DoneHandler) { - this.addChangeRequest(R_DELETE, dnsset, nil, rtype, done, nil) + this.addChangeRequest(R_DELETE, dnsset, nil, rtype, done) } -func (this *ChangeGroup) addChangeRequest(action string, old, new *dns.DNSSet, rtype string, done DoneHandler, policy *dns.RoutingPolicy) { - r := NewChangeRequest(action, rtype, old, new, done, policy) +func (this *ChangeGroup) addChangeRequest(action string, old, new *dns.DNSSet, rtype string, done DoneHandler) { + r := NewChangeRequest(action, rtype, old, new, done) this.requests = append(this.requests, r) } @@ -274,17 +274,17 @@ func (this *ChangeModel) Setup() error { sets := this.zonestate.GetDNSSets() this.context.zone.SetOwners(sets.GetOwners()) this.dangling = newChangeGroup("dangling entries", provider, this) - for rsName, set := range sets { + for setName, set := range sets { var view *ChangeGroup - provider = this.context.providers.LookupFor(rsName.DNSName) + provider = this.context.providers.LookupFor(setName.DNSName) if provider != nil { - this.dumpf(" %s: %d types (provider %s)", rsName, len(set.Sets), provider.ObjectName()) + this.dumpf(" %s: %d types (provider %s)", setName, len(set.Sets), provider.ObjectName()) view = this.getProviderView(provider) } else { - this.dumpf(" %s: %d types (no provider)", rsName, len(set.Sets)) + this.dumpf(" %s: %d types (no provider)", setName, len(set.Sets)) view = this.dangling } - view.dnssets[rsName] = set + view.dnssets[setName] = set for t, r := range set.Sets { this.dumpf(" %s: %d records: %s", t, len(r.Records), r.RecordString()) } @@ -302,8 +302,8 @@ func (this *ChangeModel) Apply(name dns.DNSSetName, updateGroup string, createdA func (this *ChangeModel) Delete(name dns.DNSSetName, updateGroup string, createdAt time.Time, done DoneHandler, spec TargetSpec) ChangeResult { return this.Exec(true, true, name, updateGroup, createdAt, done, spec) } -func (this *ChangeModel) PseudoApply(name dns.DNSSetName) { - this.applied[name] = dns.NewDNSSet(name) +func (this *ChangeModel) PseudoApply(name dns.DNSSetName, spec TargetSpec) { + this.applied[name] = dns.NewDNSSet(name, spec.RoutingPolicy()) } func (this *ChangeModel) Exec(apply bool, delete bool, name dns.DNSSetName, updateGroup string, createdAt time.Time, done DoneHandler, spec TargetSpec) ChangeResult { @@ -331,7 +331,7 @@ func (this *ChangeModel) Exec(apply bool, delete bool, name dns.DNSSetName, upda view := this.getProviderView(p) oldset := view.dnssets[name] - newset := dns.NewDNSSet(name) + newset := dns.NewDNSSet(name, spec.RoutingPolicy()) newset.UpdateGroup = updateGroup newset.SetKind(spec.Kind()) if !delete { @@ -365,16 +365,16 @@ func (this *ChangeModel) Exec(apply bool, delete bool, name dns.DNSSetName, upda curset := oldset.Sets[ty] if curset == nil { if apply { - view.addCreateRequest(newset, ty, done, spec.RoutingPolicy()) + view.addCreateRequest(newset, ty, done) } mod = true } else { - olddns, _ := dns.MapToProviderEx(ty, oldset, this.Domain(), spec.RoutingPolicy()) - newdns, _ := dns.MapToProviderEx(ty, newset, this.Domain(), spec.RoutingPolicy()) + olddns, _ := dns.MapToProvider(ty, oldset, this.Domain()) + newdns, _ := dns.MapToProvider(ty, newset, this.Domain()) if olddns == newdns { - if !curset.Match(rset) { + if !curset.Match(rset) || !reflect.DeepEqual(spec.RoutingPolicy(), oldset.RoutingPolicy) { if apply { - view.addUpdateRequest(oldset, newset, ty, done, spec.RoutingPolicy()) + view.addUpdateRequest(oldset, newset, ty, done) } mod = true } else { @@ -384,7 +384,7 @@ func (this *ChangeModel) Exec(apply bool, delete bool, name dns.DNSSetName, upda } } else { if apply { - view.addCreateRequest(newset, ty, done, spec.RoutingPolicy()) + view.addCreateRequest(newset, ty, done) view.addDeleteRequest(oldset, ty, this.wrappedDoneHandler(name, nil)) } mod = true @@ -404,9 +404,9 @@ func (this *ChangeModel) Exec(apply bool, delete bool, name dns.DNSSetName, upda if !delete { if apply { this.Infof("no existing entry found for %s", name) - this.setOwner(newset, spec.OwnerId(), spec.RoutingPolicy()) + this.setOwner(newset, spec.OwnerId()) for ty := range newset.Sets { - view.addCreateRequest(newset, ty, done, spec.RoutingPolicy()) + view.addCreateRequest(newset, ty, done) } } mod = true @@ -449,11 +449,11 @@ func (this *ChangeModel) IsFailed(name dns.DNSSetName) bool { return this.failedDNSNames.Contains(name) } -func (this *ChangeModel) wrappedDoneHandler(rsName dns.DNSSetName, done DoneHandler) DoneHandler { +func (this *ChangeModel) wrappedDoneHandler(name dns.DNSSetName, done DoneHandler) DoneHandler { return &changeModelDoneHandler{ changeModel: this, inner: done, - rsName: rsName, + dnsSetName: name, } } @@ -463,7 +463,7 @@ func (this *ChangeModel) wrappedDoneHandler(rsName dns.DNSSetName, done DoneHand type changeModelDoneHandler struct { changeModel *ChangeModel inner DoneHandler - rsName dns.DNSSetName + dnsSetName dns.DNSSetName } func (this *changeModelDoneHandler) SetInvalid(err error) { @@ -473,7 +473,7 @@ func (this *changeModelDoneHandler) SetInvalid(err error) { } func (this *changeModelDoneHandler) Failed(err error) { - this.changeModel.failedDNSNames.Add(this.rsName) + this.changeModel.failedDNSNames.Add(this.dnsSetName) if this.inner != nil { this.inner.Failed(err) } @@ -502,12 +502,12 @@ func (this *ChangeModel) IsForeign(set *dns.DNSSet) bool { return set.IsForeign(this.ownership) } -func (this *ChangeModel) setOwner(set *dns.DNSSet, id string, policy *dns.RoutingPolicy) bool { +func (this *ChangeModel) setOwner(set *dns.DNSSet, id string) bool { if id == "" { id = this.config.Ident } if id != "" { - set.SetOwner(id, policy) + set.SetOwner(id) return true } return false @@ -516,8 +516,8 @@ func (this *ChangeModel) setOwner(set *dns.DNSSet, id string, policy *dns.Routin func (this *ChangeModel) ApplySpec(set *dns.DNSSet, base *dns.DNSSet, provider DNSProvider, spec TargetSpec) *dns.DNSSet { set.SetKind(spec.Kind()) if base == nil || !this.IsForeign(base) { - if this.setOwner(set, spec.OwnerId(), spec.RoutingPolicy()) { - set.SetMetaAttr(dns.ATTR_PREFIX, dns.TxtPrefix, spec.RoutingPolicy()) + if this.setOwner(set, spec.OwnerId()) { + set.SetMetaAttr(dns.ATTR_PREFIX, dns.TxtPrefix) } } @@ -531,10 +531,10 @@ func (this *ChangeModel) ApplySpec(set *dns.DNSSet, base *dns.DNSSet, provider D ipv4addrs, ipv6addrs, err := lookupHosts(t.GetHostName()) if err == nil { for _, addr := range ipv4addrs { - AddRecord(targetsets, dns.RS_A, addr, ttl, spec.RoutingPolicy()) + AddRecord(targetsets, dns.RS_A, addr, ttl) } for _, addr := range ipv6addrs { - AddRecord(targetsets, dns.RS_AAAA, addr, ttl, spec.RoutingPolicy()) + AddRecord(targetsets, dns.RS_AAAA, addr, ttl) } } else { this.Errorf("cannot lookup '%s': %s", t.GetHostName(), err) @@ -543,21 +543,21 @@ func (this *ChangeModel) ApplySpec(set *dns.DNSSet, base *dns.DNSSet, provider D t.GetHostName(), strings.Join(ipv4addrs, ","), strings.Join(ipv6addrs, ",")) } else { t = provider.MapTarget(t) - AddRecord(targetsets, t.GetRecordType(), t.GetHostName(), ttl, spec.RoutingPolicy()) + AddRecord(targetsets, t.GetRecordType(), t.GetHostName(), ttl) } } set.Sets = targetsets if len(cnames) > 0 && this.Owns(set) { sort.Strings(cnames) - set.SetMetaAttr(dns.ATTR_CNAMES, strings.Join(cnames, ","), spec.RoutingPolicy()) + set.SetMetaAttr(dns.ATTR_CNAMES, strings.Join(cnames, ",")) } return set } -func AddRecord(targetsets dns.RecordSets, ty string, host string, ttl int64, policy *dns.RoutingPolicy) { +func AddRecord(targetsets dns.RecordSets, ty string, host string, ttl int64) { rs := targetsets[ty] if rs == nil { - rs = dns.NewRecordSetEx(ty, ttl, policy, nil) + rs = dns.NewRecordSet(ty, ttl, nil) targetsets[ty] = rs } rs.Records = append(rs.Records, &dns.Record{Value: host}) diff --git a/pkg/dns/provider/dedicatedrecord.go b/pkg/dns/provider/dedicatedrecord.go index 5871ec896..c51ec9baf 100644 --- a/pkg/dns/provider/dedicatedrecord.go +++ b/pkg/dns/provider/dedicatedrecord.go @@ -26,7 +26,7 @@ import ( ) type DedicatedDNSAccess interface { - GetRecordSet(zone DNSHostedZone, rsName dns.DNSSetName, recordType string) (DedicatedRecordSet, error) + GetRecordSet(zone DNSHostedZone, name dns.DNSSetName, recordType string) (DedicatedRecordSet, error) CreateOrUpdateRecordSet(logger logger.LogContext, zone DNSHostedZone, old, new DedicatedRecordSet) error DeleteRecordSet(logger logger.LogContext, zone DNSHostedZone, rs DedicatedRecordSet) error } diff --git a/pkg/dns/provider/entry.go b/pkg/dns/provider/entry.go index 1465c71bc..3abff0636 100644 --- a/pkg/dns/provider/entry.go +++ b/pkg/dns/provider/entry.go @@ -81,7 +81,7 @@ func (this *EntryPremise) NotifyChange(p *EntryPremise) string { type EntryVersion struct { object dnsutils.DNSSpecification providername resources.ObjectName - rsname dns.DNSSetName + dnsSetName dns.DNSSetName targets Targets routingPolicy *dns.RoutingPolicy mappings map[string][]string @@ -98,10 +98,10 @@ type EntryVersion struct { func NewEntryVersion(object dnsutils.DNSSpecification, old *Entry) *EntryVersion { v := &EntryVersion{ - object: object, - rsname: dns.DNSSetName{DNSName: object.GetDNSName(), SetIdentifier: object.GetSetIdentifier()}, - targets: Targets{}, - mappings: map[string][]string{}, + object: object, + dnsSetName: dns.DNSSetName{DNSName: object.GetDNSName(), SetIdentifier: object.GetSetIdentifier()}, + targets: Targets{}, + mappings: map[string][]string{}, } if old != nil { v.status = old.status @@ -116,7 +116,7 @@ func (this *EntryVersion) Kind() string { } func (this *EntryVersion) RequiresUpdateFor(e *EntryVersion) (reasons []string, refresh bool) { - if this.rsname != e.rsname { + if this.dnsSetName != e.dnsSetName { reasons = append(reasons, "recordset name changed") } if !utils.Int64Equal(this.status.TTL, e.status.TTL) { @@ -194,19 +194,19 @@ func (this *EntryVersion) ObjectName() resources.ObjectName { } func (this *EntryVersion) DNSName() string { - return this.rsname.DNSName + return this.dnsSetName.DNSName } -func (this *EntryVersion) SetIdentifier() string { - return this.rsname.SetIdentifier +func (this *EntryVersion) GetSetIdentifier() string { + return this.dnsSetName.SetIdentifier } func (this *EntryVersion) DNSSetName() dns.DNSSetName { - return this.rsname + return this.dnsSetName } func (this *EntryVersion) ZonedDNSName() ZonedDNSSetName { - return ZonedDNSSetName{ZoneID: this.ZoneId(), DNSSetName: this.rsname} + return ZonedDNSSetName{ZoneID: this.ZoneId(), DNSSetName: this.dnsSetName} } func (this *EntryVersion) Targets() Targets { @@ -375,17 +375,17 @@ func validate(logger logger.LogContext, state *state, entry *EntryVersion, p *En return } - if p.zonedomain == entry.rsname.DNSName { + if p.zonedomain == entry.dnsSetName.DNSName { err = fmt.Errorf("usage of dns name (%s) identical to domain of hosted zone (%s) is not supported", p.zonedomain, p.zoneid) return } if len(effspec.GetTargets()) > 0 && len(effspec.GetText()) > 0 { - err = fmt.Errorf("only Text or Targets possible: %s", err) + err = fmt.Errorf("only Text or Targets possible") return } if ttl := effspec.GetTTL(); ttl != nil && (*ttl == 0 || *ttl < 0) { - err = fmt.Errorf("TTL must be greater than zero: %s", err) + err = fmt.Errorf("TTL must be greater than zero") return } @@ -605,7 +605,7 @@ func (this *EntryVersion) Setup(logger logger.LogContext, state *state, p *Entry if p.zoneid == "" { this.status.State = api.STATE_ERROR this.status.Provider = nil - this.status.Message = StatusMessagef("no provider found for %q", this.rsname) + this.status.Message = StatusMessagef("no provider found for %q", this.dnsSetName) } else { if p.provider.IsValid() { this.valid = true diff --git a/pkg/dns/provider/inmemory.go b/pkg/dns/provider/inmemory.go index 030f14990..189951d50 100644 --- a/pkg/dns/provider/inmemory.go +++ b/pkg/dns/provider/inmemory.go @@ -109,10 +109,9 @@ func (m *InMemory) Apply(zoneID dns.ZoneID, request *ChangeRequest, metrics Metr } name, rset := buildRecordSet(request) - switch request.Action { case R_CREATE, R_UPDATE: - data.dnssets.AddRecordSet(name, rset) + data.dnssets.AddRecordSet(name, request.Addition.RoutingPolicy, rset) metrics.AddZoneRequests(zoneID.ID, M_UPDATERECORDS, 1) case R_DELETE: data.dnssets.RemoveRecordSet(name, rset.Type) diff --git a/pkg/dns/provider/raw/execution.go b/pkg/dns/provider/raw/execution.go index 46a0c497e..19035b282 100644 --- a/pkg/dns/provider/raw/execution.go +++ b/pkg/dns/provider/raw/execution.go @@ -72,15 +72,15 @@ func (this *Execution) AddChange(req *provider.ChangeRequest) { var newset, oldset *dns.RecordSet if req.Addition != nil { - name, newset = dns.MapToProviderEx(req.Type, req.Addition, this.domain, nil) + name, newset = dns.MapToProvider(req.Type, req.Addition, this.domain) } if req.Deletion != nil { - name, oldset = dns.MapToProviderEx(req.Type, req.Deletion, this.domain, nil) + name, oldset = dns.MapToProvider(req.Type, req.Deletion, this.domain) } if name.DNSName == "" || (newset.Length() == 0 && oldset.Length() == 0) { return } - if name.SetIdentifier != "" || req.RoutingPolicy != nil { + if name.SetIdentifier != "" || (req.Addition != nil && req.Addition.RoutingPolicy != nil) || (req.Deletion != nil && req.Deletion.RoutingPolicy != nil) { err := fmt.Errorf("routing policy not supported") this.Warnf("record set %s[%s]: %s", name, this.zone.Id(), err) if req.Done != nil { diff --git a/pkg/dns/provider/raw/records.go b/pkg/dns/provider/raw/records.go index 648905eb7..529562985 100644 --- a/pkg/dns/provider/raw/records.go +++ b/pkg/dns/provider/raw/records.go @@ -114,7 +114,7 @@ func (this *ZoneState) CalculateDNSSets() { rs.TTL = int64(r.GetTTL()) rs.Add(&dns.Record{Value: r.GetValue()}) } - this.dnssets.AddRecordSetFromProviderEx(dnsname, rs) + this.dnssets.AddRecordSetFromProviderEx(dnsname, nil, rs) } } } diff --git a/pkg/dns/provider/state_zone.go b/pkg/dns/provider/state_zone.go index 3e59928e3..b8543f1f6 100644 --- a/pkg/dns/provider/state_zone.go +++ b/pkg/dns/provider/state_zone.go @@ -203,7 +203,7 @@ func (this *state) reconcileZone(logger logger.LogContext, req *zoneReconciliati if changeResult.Modified { if accepted, delay := this.tryAcceptProviderRateLimiter(logger, e); !accepted { req.zone.nextTrigger = delay - changes.PseudoApply(e.DNSSetName()) + changes.PseudoApply(e.DNSSetName(), spec) logger.Infof("rate limited %s, delay %.1f s", e.ObjectName(), delay.Seconds()) statusUpdate.Throttled() if delay.Seconds() > 2 { diff --git a/pkg/dns/records.go b/pkg/dns/records.go index 9dbb2f994..263562ff2 100644 --- a/pkg/dns/records.go +++ b/pkg/dns/records.go @@ -18,7 +18,6 @@ package dns import ( "fmt" - "reflect" "strings" ) @@ -56,26 +55,18 @@ func (this *Record) Clone() *Record { } type RecordSet struct { - Type string - TTL int64 - IgnoreTTL bool - RoutingPolicy *RoutingPolicy - Records Records + Type string + TTL int64 + IgnoreTTL bool + Records Records } func NewRecordSet(rtype string, ttl int64, records []*Record) *RecordSet { - return NewRecordSetEx(rtype, ttl, nil, records) -} - -func NewRecordSetEx(rtype string, ttl int64, policy *RoutingPolicy, records []*Record) *RecordSet { - if records == nil { - records = Records{} - } - return &RecordSet{Type: rtype, TTL: ttl, RoutingPolicy: policy, Records: records} + return &RecordSet{Type: rtype, TTL: ttl, Records: records} } func (this *RecordSet) Clone() *RecordSet { - set := &RecordSet{Type: this.Type, TTL: this.TTL, IgnoreTTL: this.IgnoreTTL, RoutingPolicy: this.RoutingPolicy.Clone()} + set := &RecordSet{Type: this.Type, TTL: this.TTL, IgnoreTTL: this.IgnoreTTL} for _, r := range this.Records { set.Records = append(set.Records, r.Clone()) } @@ -117,9 +108,6 @@ func (this *RecordSet) Match(set *RecordSet) bool { if !this.IgnoreTTL && !set.IgnoreTTL && this.TTL != set.TTL { return false } - if !reflect.DeepEqual(this.RoutingPolicy, set.RoutingPolicy) { - return false - } for _, r := range this.Records { found := false @@ -207,7 +195,7 @@ func newAttrRecord(name, value string) *Record { return &Record{Value: newAttrValue(name, value)} } -func newAttrRecordSet(ty string, name, value string, routingPolicy *RoutingPolicy) *RecordSet { +func newAttrRecordSet(ty string, name, value string) *RecordSet { records := []*Record{newAttrRecord(name, value)} - return &RecordSet{Type: ty, TTL: 600, IgnoreTTL: false, RoutingPolicy: routingPolicy, Records: records} + return &RecordSet{Type: ty, TTL: 600, IgnoreTTL: false, Records: records} } diff --git a/pkg/dns/source/defaults.go b/pkg/dns/source/defaults.go index 5d35c1d4c..84fc18ef5 100644 --- a/pkg/dns/source/defaults.go +++ b/pkg/dns/source/defaults.go @@ -93,7 +93,7 @@ func (this *DefaultDNSSource) CreateDNSFeedback(obj resources.Object) DNSFeedbac func (this *DefaultDNSSource) GetDNSInfo(logger logger.LogContext, obj resources.Object, current *DNSCurrentState) (*DNSInfo, error) { info := &DNSInfo{} - info.Names = dns.NewDNSNameSetFromStringSet(current.AnnotatedNames, current.SetIdentifier()) + info.Names = dns.NewDNSNameSetFromStringSet(current.AnnotatedNames, current.GetSetIdentifier()) tgts, txts, err := this.handler(logger, obj, info.Names) info.Targets = tgts info.Text = txts diff --git a/pkg/dns/source/interface.go b/pkg/dns/source/interface.go index dea05c0b8..6f94720e4 100644 --- a/pkg/dns/source/interface.go +++ b/pkg/dns/source/interface.go @@ -81,7 +81,7 @@ type DNSCurrentState struct { AnnotatedRoutingPolicy *v1alpha1.RoutingPolicy } -func (s *DNSCurrentState) SetIdentifier() string { +func (s *DNSCurrentState) GetSetIdentifier() string { if s.AnnotatedRoutingPolicy == nil { return "" } diff --git a/pkg/server/remote/common/remote.pb.go b/pkg/server/remote/common/remote.pb.go index 6f813d6c2..f6440b78f 100644 --- a/pkg/server/remote/common/remote.pb.go +++ b/pkg/server/remote/common/remote.pb.go @@ -66,7 +66,7 @@ func (x ChangeRequest_ActionType) Number() protoreflect.EnumNumber { // Deprecated: Use ChangeRequest_ActionType.Descriptor instead. func (ChangeRequest_ActionType) EnumDescriptor() ([]byte, []int) { - return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{11, 0} + return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{12, 0} } type LogEntry_Level int32 @@ -118,7 +118,7 @@ func (x LogEntry_Level) Number() protoreflect.EnumNumber { // Deprecated: Use LogEntry_Level.Descriptor instead. func (LogEntry_Level) EnumDescriptor() ([]byte, []int) { - return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{12, 0} + return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{13, 0} } type ChangeResponse_State int32 @@ -173,7 +173,7 @@ func (x ChangeResponse_State) Number() protoreflect.EnumNumber { // Deprecated: Use ChangeResponse_State.Descriptor instead. func (ChangeResponse_State) EnumDescriptor() ([]byte, []int) { - return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{14, 0} + return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{15, 0} } type LoginRequest struct { @@ -535,10 +535,9 @@ type RecordSet struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Ttl int32 `protobuf:"varint,2,opt,name=ttl,proto3" json:"ttl,omitempty"` - Record []*RecordSet_Record `protobuf:"bytes,3,rep,name=record,proto3" json:"record,omitempty"` - RoutingPolicy *RecordSet_RoutingPolicy `protobuf:"bytes,4,opt,name=routing_policy,json=routingPolicy,proto3" json:"routing_policy,omitempty"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Ttl int32 `protobuf:"varint,2,opt,name=ttl,proto3" json:"ttl,omitempty"` + Record []*RecordSet_Record `protobuf:"bytes,3,rep,name=record,proto3" json:"record,omitempty"` } func (x *RecordSet) Reset() { @@ -594,9 +593,57 @@ func (x *RecordSet) GetRecord() []*RecordSet_Record { return nil } -func (x *RecordSet) GetRoutingPolicy() *RecordSet_RoutingPolicy { +type RoutingPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Parameters map[string]string `protobuf:"bytes,2,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *RoutingPolicy) Reset() { + *x = RoutingPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RoutingPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutingPolicy) ProtoMessage() {} + +func (x *RoutingPolicy) ProtoReflect() protoreflect.Message { + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutingPolicy.ProtoReflect.Descriptor instead. +func (*RoutingPolicy) Descriptor() ([]byte, []int) { + return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{7} +} + +func (x *RoutingPolicy) GetType() string { if x != nil { - return x.RoutingPolicy + return x.Type + } + return "" +} + +func (x *RoutingPolicy) GetParameters() map[string]string { + if x != nil { + return x.Parameters } return nil } @@ -610,12 +657,13 @@ type DNSSet struct { UpdateGroup string `protobuf:"bytes,2,opt,name=update_group,json=updateGroup,proto3" json:"update_group,omitempty"` Records map[string]*RecordSet `protobuf:"bytes,3,rep,name=records,proto3" json:"records,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` SetIdentifier string `protobuf:"bytes,4,opt,name=set_identifier,json=setIdentifier,proto3" json:"set_identifier,omitempty"` + RoutingPolicy *RoutingPolicy `protobuf:"bytes,5,opt,name=routing_policy,json=routingPolicy,proto3" json:"routing_policy,omitempty"` } func (x *DNSSet) Reset() { *x = DNSSet{} if protoimpl.UnsafeEnabled { - mi := &file_pkg_server_remote_common_remote_proto_msgTypes[7] + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -628,7 +676,7 @@ func (x *DNSSet) String() string { func (*DNSSet) ProtoMessage() {} func (x *DNSSet) ProtoReflect() protoreflect.Message { - mi := &file_pkg_server_remote_common_remote_proto_msgTypes[7] + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -641,7 +689,7 @@ func (x *DNSSet) ProtoReflect() protoreflect.Message { // Deprecated: Use DNSSet.ProtoReflect.Descriptor instead. func (*DNSSet) Descriptor() ([]byte, []int) { - return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{7} + return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{8} } func (x *DNSSet) GetDnsName() string { @@ -672,22 +720,30 @@ func (x *DNSSet) GetSetIdentifier() string { return "" } +func (x *DNSSet) GetRoutingPolicy() *RoutingPolicy { + if x != nil { + return x.RoutingPolicy + } + return nil +} + type PartialDNSSet struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - DnsName string `protobuf:"bytes,1,opt,name=dns_name,json=dnsName,proto3" json:"dns_name,omitempty"` - UpdateGroup string `protobuf:"bytes,2,opt,name=update_group,json=updateGroup,proto3" json:"update_group,omitempty"` - RecordType string `protobuf:"bytes,3,opt,name=record_type,json=recordType,proto3" json:"record_type,omitempty"` - RecordSet *RecordSet `protobuf:"bytes,4,opt,name=record_set,json=recordSet,proto3" json:"record_set,omitempty"` - SetIdentifier string `protobuf:"bytes,5,opt,name=set_identifier,json=setIdentifier,proto3" json:"set_identifier,omitempty"` + DnsName string `protobuf:"bytes,1,opt,name=dns_name,json=dnsName,proto3" json:"dns_name,omitempty"` + UpdateGroup string `protobuf:"bytes,2,opt,name=update_group,json=updateGroup,proto3" json:"update_group,omitempty"` + RecordType string `protobuf:"bytes,3,opt,name=record_type,json=recordType,proto3" json:"record_type,omitempty"` + RecordSet *RecordSet `protobuf:"bytes,4,opt,name=record_set,json=recordSet,proto3" json:"record_set,omitempty"` + SetIdentifier string `protobuf:"bytes,5,opt,name=set_identifier,json=setIdentifier,proto3" json:"set_identifier,omitempty"` + RoutingPolicy *RoutingPolicy `protobuf:"bytes,6,opt,name=routing_policy,json=routingPolicy,proto3" json:"routing_policy,omitempty"` } func (x *PartialDNSSet) Reset() { *x = PartialDNSSet{} if protoimpl.UnsafeEnabled { - mi := &file_pkg_server_remote_common_remote_proto_msgTypes[8] + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -700,7 +756,7 @@ func (x *PartialDNSSet) String() string { func (*PartialDNSSet) ProtoMessage() {} func (x *PartialDNSSet) ProtoReflect() protoreflect.Message { - mi := &file_pkg_server_remote_common_remote_proto_msgTypes[8] + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -713,7 +769,7 @@ func (x *PartialDNSSet) ProtoReflect() protoreflect.Message { // Deprecated: Use PartialDNSSet.ProtoReflect.Descriptor instead. func (*PartialDNSSet) Descriptor() ([]byte, []int) { - return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{8} + return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{9} } func (x *PartialDNSSet) GetDnsName() string { @@ -751,6 +807,13 @@ func (x *PartialDNSSet) GetSetIdentifier() string { return "" } +func (x *PartialDNSSet) GetRoutingPolicy() *RoutingPolicy { + if x != nil { + return x.RoutingPolicy + } + return nil +} + type ZoneState struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -763,7 +826,7 @@ type ZoneState struct { func (x *ZoneState) Reset() { *x = ZoneState{} if protoimpl.UnsafeEnabled { - mi := &file_pkg_server_remote_common_remote_proto_msgTypes[9] + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -776,7 +839,7 @@ func (x *ZoneState) String() string { func (*ZoneState) ProtoMessage() {} func (x *ZoneState) ProtoReflect() protoreflect.Message { - mi := &file_pkg_server_remote_common_remote_proto_msgTypes[9] + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -789,7 +852,7 @@ func (x *ZoneState) ProtoReflect() protoreflect.Message { // Deprecated: Use ZoneState.ProtoReflect.Descriptor instead. func (*ZoneState) Descriptor() ([]byte, []int) { - return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{9} + return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{10} } func (x *ZoneState) GetKey() string { @@ -819,7 +882,7 @@ type ExecuteRequest struct { func (x *ExecuteRequest) Reset() { *x = ExecuteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_pkg_server_remote_common_remote_proto_msgTypes[10] + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -832,7 +895,7 @@ func (x *ExecuteRequest) String() string { func (*ExecuteRequest) ProtoMessage() {} func (x *ExecuteRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_server_remote_common_remote_proto_msgTypes[10] + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -845,7 +908,7 @@ func (x *ExecuteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ExecuteRequest.ProtoReflect.Descriptor instead. func (*ExecuteRequest) Descriptor() ([]byte, []int) { - return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{10} + return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{11} } func (x *ExecuteRequest) GetToken() string { @@ -881,7 +944,7 @@ type ChangeRequest struct { func (x *ChangeRequest) Reset() { *x = ChangeRequest{} if protoimpl.UnsafeEnabled { - mi := &file_pkg_server_remote_common_remote_proto_msgTypes[11] + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -894,7 +957,7 @@ func (x *ChangeRequest) String() string { func (*ChangeRequest) ProtoMessage() {} func (x *ChangeRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_server_remote_common_remote_proto_msgTypes[11] + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -907,7 +970,7 @@ func (x *ChangeRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ChangeRequest.ProtoReflect.Descriptor instead. func (*ChangeRequest) Descriptor() ([]byte, []int) { - return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{11} + return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{12} } func (x *ChangeRequest) GetAction() ChangeRequest_ActionType { @@ -937,7 +1000,7 @@ type LogEntry struct { func (x *LogEntry) Reset() { *x = LogEntry{} if protoimpl.UnsafeEnabled { - mi := &file_pkg_server_remote_common_remote_proto_msgTypes[12] + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -950,7 +1013,7 @@ func (x *LogEntry) String() string { func (*LogEntry) ProtoMessage() {} func (x *LogEntry) ProtoReflect() protoreflect.Message { - mi := &file_pkg_server_remote_common_remote_proto_msgTypes[12] + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -963,7 +1026,7 @@ func (x *LogEntry) ProtoReflect() protoreflect.Message { // Deprecated: Use LogEntry.ProtoReflect.Descriptor instead. func (*LogEntry) Descriptor() ([]byte, []int) { - return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{12} + return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{13} } func (x *LogEntry) GetTimestamp() int64 { @@ -999,7 +1062,7 @@ type ExecuteResponse struct { func (x *ExecuteResponse) Reset() { *x = ExecuteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_pkg_server_remote_common_remote_proto_msgTypes[13] + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1012,7 +1075,7 @@ func (x *ExecuteResponse) String() string { func (*ExecuteResponse) ProtoMessage() {} func (x *ExecuteResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_server_remote_common_remote_proto_msgTypes[13] + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1025,7 +1088,7 @@ func (x *ExecuteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ExecuteResponse.ProtoReflect.Descriptor instead. func (*ExecuteResponse) Descriptor() ([]byte, []int) { - return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{13} + return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{14} } func (x *ExecuteResponse) GetChangeResponse() []*ChangeResponse { @@ -1054,7 +1117,7 @@ type ChangeResponse struct { func (x *ChangeResponse) Reset() { *x = ChangeResponse{} if protoimpl.UnsafeEnabled { - mi := &file_pkg_server_remote_common_remote_proto_msgTypes[14] + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1067,7 +1130,7 @@ func (x *ChangeResponse) String() string { func (*ChangeResponse) ProtoMessage() {} func (x *ChangeResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_server_remote_common_remote_proto_msgTypes[14] + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1080,7 +1143,7 @@ func (x *ChangeResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ChangeResponse.ProtoReflect.Descriptor instead. func (*ChangeResponse) Descriptor() ([]byte, []int) { - return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{14} + return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{15} } func (x *ChangeResponse) GetState() ChangeResponse_State { @@ -1108,7 +1171,7 @@ type RecordSet_Record struct { func (x *RecordSet_Record) Reset() { *x = RecordSet_Record{} if protoimpl.UnsafeEnabled { - mi := &file_pkg_server_remote_common_remote_proto_msgTypes[15] + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1121,7 +1184,7 @@ func (x *RecordSet_Record) String() string { func (*RecordSet_Record) ProtoMessage() {} func (x *RecordSet_Record) ProtoReflect() protoreflect.Message { - mi := &file_pkg_server_remote_common_remote_proto_msgTypes[15] + mi := &file_pkg_server_remote_common_remote_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1144,61 +1207,6 @@ func (x *RecordSet_Record) GetValue() string { return "" } -type RecordSet_RoutingPolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Parameters map[string]string `protobuf:"bytes,2,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *RecordSet_RoutingPolicy) Reset() { - *x = RecordSet_RoutingPolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_server_remote_common_remote_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RecordSet_RoutingPolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RecordSet_RoutingPolicy) ProtoMessage() {} - -func (x *RecordSet_RoutingPolicy) ProtoReflect() protoreflect.Message { - mi := &file_pkg_server_remote_common_remote_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RecordSet_RoutingPolicy.ProtoReflect.Descriptor instead. -func (*RecordSet_RoutingPolicy) Descriptor() ([]byte, []int) { - return file_pkg_server_remote_common_remote_proto_rawDescGZIP(), []int{6, 1} -} - -func (x *RecordSet_RoutingPolicy) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *RecordSet_RoutingPolicy) GetParameters() map[string]string { - if x != nil { - return x.Parameters - } - return nil -} - var File_pkg_server_remote_common_remote_proto protoreflect.FileDescriptor var file_pkg_server_remote_common_remote_proto_rawDesc = []byte{ @@ -1239,140 +1247,143 @@ var file_pkg_server_remote_common_remote_proto_rawDesc = []byte{ 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x69, 0x64, 0x22, 0x81, 0x03, 0x0a, 0x09, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x69, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x09, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x30, 0x0a, 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, - 0x64, 0x12, 0x46, 0x0a, 0x0e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x6f, 0x75, + 0x64, 0x1a, 0x1e, 0x0a, 0x06, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xa9, 0x01, 0x0a, 0x0d, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x3d, + 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb1, 0x02, + 0x0a, 0x06, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x64, 0x6e, 0x73, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6e, 0x73, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x67, 0x72, + 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x35, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x2e, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x25, 0x0a, + 0x0e, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x0e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x52, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x1a, 0x4d, 0x0a, 0x0c, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x85, 0x02, 0x0a, 0x0d, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x4e, 0x53, + 0x53, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x64, 0x6e, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6e, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x30, 0x0a, 0x0a, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x73, 0x65, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, + 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x52, 0x09, 0x72, 0x65, 0x63, 0x6f, 0x72, + 0x64, 0x53, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x65, + 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x0e, 0x72, + 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0d, 0x72, 0x6f, 0x75, 0x74, - 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x1e, 0x0a, 0x06, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0xb3, 0x01, 0x0a, 0x0d, 0x52, 0x6f, - 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x4f, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, - 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0xf3, 0x01, 0x0a, 0x06, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x64, 0x6e, - 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6e, - 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, - 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x35, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x6f, - 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x2e, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, - 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x1a, 0x4d, 0x0a, 0x0c, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc7, 0x01, 0x0a, 0x0d, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, - 0x6c, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x64, 0x6e, 0x73, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6e, 0x73, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x67, 0x72, 0x6f, - 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x63, 0x6f, - 0x72, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, 0x0a, 0x0a, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, - 0x5f, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x72, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x52, 0x09, 0x72, - 0x65, 0x63, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x74, 0x5f, - 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x73, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x22, - 0xa4, 0x01, 0x0a, 0x09, 0x5a, 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x39, 0x0a, 0x08, 0x64, 0x6e, 0x73, 0x5f, 0x73, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x2e, 0x44, 0x6e, 0x73, 0x53, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x07, 0x64, 0x6e, 0x73, 0x53, 0x65, 0x74, 0x73, 0x1a, 0x4a, 0x0a, 0x0c, 0x44, 0x6e, - 0x73, 0x53, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x72, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x7c, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, - 0x0a, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x7a, 0x6f, 0x6e, 0x65, 0x69, 0x64, 0x12, 0x3c, 0x0a, 0x0e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, + 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xa4, 0x01, 0x0a, 0x09, 0x5a, 0x6f, + 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x08, 0x64, 0x6e, 0x73, + 0x5f, 0x73, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x44, + 0x6e, 0x73, 0x53, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x64, 0x6e, 0x73, + 0x53, 0x65, 0x74, 0x73, 0x1a, 0x4a, 0x0a, 0x0c, 0x44, 0x6e, 0x73, 0x53, 0x65, 0x74, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x44, + 0x4e, 0x53, 0x53, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x7c, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x7a, 0x6f, 0x6e, 0x65, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x69, 0x64, + 0x12, 0x3c, 0x0a, 0x0e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, + 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xaa, + 0x01, 0x0a, 0x0d, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x38, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x20, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x06, 0x63, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x4e, 0x53, 0x53, 0x65, + 0x74, 0x52, 0x06, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x30, 0x0a, 0x0a, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x52, 0x45, 0x41, 0x54, + 0x45, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, + 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x02, 0x22, 0xa3, 0x01, 0x0a, 0x08, + 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2c, 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4c, + 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x05, 0x6c, + 0x65, 0x76, 0x65, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x31, + 0x0a, 0x05, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x42, 0x55, 0x47, + 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, + 0x57, 0x41, 0x52, 0x4e, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, + 0x03, 0x22, 0x85, 0x01, 0x0a, 0x0f, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x22, 0xaa, 0x01, 0x0a, 0x0d, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x2d, 0x0a, 0x06, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, - 0x6c, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x74, 0x52, 0x06, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x22, - 0x30, 0x0a, 0x0a, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, - 0x06, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x44, - 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, - 0x02, 0x22, 0xa3, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, - 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2c, 0x0a, 0x05, - 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x72, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x22, 0x31, 0x0a, 0x05, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x09, 0x0a, - 0x05, 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x4e, 0x46, 0x4f, - 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x57, 0x41, 0x52, 0x4e, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, - 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x22, 0x85, 0x01, 0x0a, 0x0f, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0f, 0x63, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x43, 0x68, - 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0e, 0x63, 0x68, - 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x0b, - 0x6c, 0x6f, 0x67, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x10, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x0a, 0x6c, 0x6f, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, - 0xbc, 0x01, 0x0a, 0x0e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x1c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x51, 0x0a, 0x05, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x4e, 0x4f, 0x54, 0x5f, 0x50, 0x52, 0x4f, 0x43, - 0x45, 0x53, 0x53, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x55, 0x43, 0x43, 0x45, - 0x45, 0x44, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, - 0x44, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x12, - 0x0d, 0x0a, 0x09, 0x54, 0x48, 0x52, 0x4f, 0x54, 0x54, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x32, 0xfe, - 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x12, 0x36, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x14, 0x2e, 0x72, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x34, 0x0a, 0x08, 0x47, 0x65, 0x74, - 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x12, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x47, - 0x65, 0x74, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, - 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x22, 0x00, 0x12, - 0x40, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x5a, 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, - 0x1b, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x5a, 0x6f, 0x6e, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x72, - 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, - 0x00, 0x12, 0x3c, 0x0a, 0x07, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x12, 0x16, 0x2e, 0x72, - 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, - 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x61, - 0x72, 0x64, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2d, - 0x64, 0x6e, 0x73, 0x2d, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x70, - 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x0b, 0x6c, 0x6f, 0x67, 0x5f, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x6c, + 0x6f, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xbc, 0x01, 0x0a, 0x0e, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x51, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x11, + 0x0a, 0x0d, 0x4e, 0x4f, 0x54, 0x5f, 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x01, + 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x02, 0x12, 0x0a, 0x0a, + 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x48, 0x52, + 0x4f, 0x54, 0x54, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x32, 0xfe, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x36, 0x0a, 0x05, 0x4c, + 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x14, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x4c, 0x6f, + 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x34, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x12, + 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x5a, 0x6f, 0x6e, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x0c, 0x47, 0x65, 0x74, + 0x5a, 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x2e, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x5a, 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, + 0x5a, 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x07, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x12, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, + 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x61, 0x72, 0x64, 0x65, 0x6e, 0x65, 0x72, + 0x2f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2d, 0x64, 0x6e, 0x73, 0x2d, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1390,60 +1401,61 @@ func file_pkg_server_remote_common_remote_proto_rawDescGZIP() []byte { var file_pkg_server_remote_common_remote_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_pkg_server_remote_common_remote_proto_msgTypes = make([]protoimpl.MessageInfo, 20) var file_pkg_server_remote_common_remote_proto_goTypes = []interface{}{ - (ChangeRequest_ActionType)(0), // 0: remote.ChangeRequest.ActionType - (LogEntry_Level)(0), // 1: remote.LogEntry.Level - (ChangeResponse_State)(0), // 2: remote.ChangeResponse.State - (*LoginRequest)(nil), // 3: remote.LoginRequest - (*LoginResponse)(nil), // 4: remote.LoginResponse - (*GetZonesRequest)(nil), // 5: remote.GetZonesRequest - (*Zones)(nil), // 6: remote.Zones - (*Zone)(nil), // 7: remote.Zone - (*GetZoneStateRequest)(nil), // 8: remote.GetZoneStateRequest - (*RecordSet)(nil), // 9: remote.RecordSet - (*DNSSet)(nil), // 10: remote.DNSSet - (*PartialDNSSet)(nil), // 11: remote.PartialDNSSet - (*ZoneState)(nil), // 12: remote.ZoneState - (*ExecuteRequest)(nil), // 13: remote.ExecuteRequest - (*ChangeRequest)(nil), // 14: remote.ChangeRequest - (*LogEntry)(nil), // 15: remote.LogEntry - (*ExecuteResponse)(nil), // 16: remote.ExecuteResponse - (*ChangeResponse)(nil), // 17: remote.ChangeResponse - (*RecordSet_Record)(nil), // 18: remote.RecordSet.Record - (*RecordSet_RoutingPolicy)(nil), // 19: remote.RecordSet.RoutingPolicy - nil, // 20: remote.RecordSet.RoutingPolicy.ParametersEntry - nil, // 21: remote.DNSSet.RecordsEntry - nil, // 22: remote.ZoneState.DnsSetsEntry + (ChangeRequest_ActionType)(0), // 0: remote.ChangeRequest.ActionType + (LogEntry_Level)(0), // 1: remote.LogEntry.Level + (ChangeResponse_State)(0), // 2: remote.ChangeResponse.State + (*LoginRequest)(nil), // 3: remote.LoginRequest + (*LoginResponse)(nil), // 4: remote.LoginResponse + (*GetZonesRequest)(nil), // 5: remote.GetZonesRequest + (*Zones)(nil), // 6: remote.Zones + (*Zone)(nil), // 7: remote.Zone + (*GetZoneStateRequest)(nil), // 8: remote.GetZoneStateRequest + (*RecordSet)(nil), // 9: remote.RecordSet + (*RoutingPolicy)(nil), // 10: remote.RoutingPolicy + (*DNSSet)(nil), // 11: remote.DNSSet + (*PartialDNSSet)(nil), // 12: remote.PartialDNSSet + (*ZoneState)(nil), // 13: remote.ZoneState + (*ExecuteRequest)(nil), // 14: remote.ExecuteRequest + (*ChangeRequest)(nil), // 15: remote.ChangeRequest + (*LogEntry)(nil), // 16: remote.LogEntry + (*ExecuteResponse)(nil), // 17: remote.ExecuteResponse + (*ChangeResponse)(nil), // 18: remote.ChangeResponse + (*RecordSet_Record)(nil), // 19: remote.RecordSet.Record + nil, // 20: remote.RoutingPolicy.ParametersEntry + nil, // 21: remote.DNSSet.RecordsEntry + nil, // 22: remote.ZoneState.DnsSetsEntry } var file_pkg_server_remote_common_remote_proto_depIdxs = []int32{ 7, // 0: remote.Zones.zone:type_name -> remote.Zone - 18, // 1: remote.RecordSet.record:type_name -> remote.RecordSet.Record - 19, // 2: remote.RecordSet.routing_policy:type_name -> remote.RecordSet.RoutingPolicy + 19, // 1: remote.RecordSet.record:type_name -> remote.RecordSet.Record + 20, // 2: remote.RoutingPolicy.parameters:type_name -> remote.RoutingPolicy.ParametersEntry 21, // 3: remote.DNSSet.records:type_name -> remote.DNSSet.RecordsEntry - 9, // 4: remote.PartialDNSSet.record_set:type_name -> remote.RecordSet - 22, // 5: remote.ZoneState.dns_sets:type_name -> remote.ZoneState.DnsSetsEntry - 14, // 6: remote.ExecuteRequest.change_request:type_name -> remote.ChangeRequest - 0, // 7: remote.ChangeRequest.action:type_name -> remote.ChangeRequest.ActionType - 11, // 8: remote.ChangeRequest.change:type_name -> remote.PartialDNSSet - 1, // 9: remote.LogEntry.level:type_name -> remote.LogEntry.Level - 17, // 10: remote.ExecuteResponse.change_response:type_name -> remote.ChangeResponse - 15, // 11: remote.ExecuteResponse.log_message:type_name -> remote.LogEntry - 2, // 12: remote.ChangeResponse.state:type_name -> remote.ChangeResponse.State - 20, // 13: remote.RecordSet.RoutingPolicy.parameters:type_name -> remote.RecordSet.RoutingPolicy.ParametersEntry - 9, // 14: remote.DNSSet.RecordsEntry.value:type_name -> remote.RecordSet - 10, // 15: remote.ZoneState.DnsSetsEntry.value:type_name -> remote.DNSSet - 3, // 16: remote.RemoteProvider.Login:input_type -> remote.LoginRequest - 5, // 17: remote.RemoteProvider.GetZones:input_type -> remote.GetZonesRequest - 8, // 18: remote.RemoteProvider.GetZoneState:input_type -> remote.GetZoneStateRequest - 13, // 19: remote.RemoteProvider.Execute:input_type -> remote.ExecuteRequest - 4, // 20: remote.RemoteProvider.Login:output_type -> remote.LoginResponse - 6, // 21: remote.RemoteProvider.GetZones:output_type -> remote.Zones - 12, // 22: remote.RemoteProvider.GetZoneState:output_type -> remote.ZoneState - 16, // 23: remote.RemoteProvider.Execute:output_type -> remote.ExecuteResponse - 20, // [20:24] is the sub-list for method output_type - 16, // [16:20] is the sub-list for method input_type - 16, // [16:16] is the sub-list for extension type_name - 16, // [16:16] is the sub-list for extension extendee - 0, // [0:16] is the sub-list for field type_name + 10, // 4: remote.DNSSet.routing_policy:type_name -> remote.RoutingPolicy + 9, // 5: remote.PartialDNSSet.record_set:type_name -> remote.RecordSet + 10, // 6: remote.PartialDNSSet.routing_policy:type_name -> remote.RoutingPolicy + 22, // 7: remote.ZoneState.dns_sets:type_name -> remote.ZoneState.DnsSetsEntry + 15, // 8: remote.ExecuteRequest.change_request:type_name -> remote.ChangeRequest + 0, // 9: remote.ChangeRequest.action:type_name -> remote.ChangeRequest.ActionType + 12, // 10: remote.ChangeRequest.change:type_name -> remote.PartialDNSSet + 1, // 11: remote.LogEntry.level:type_name -> remote.LogEntry.Level + 18, // 12: remote.ExecuteResponse.change_response:type_name -> remote.ChangeResponse + 16, // 13: remote.ExecuteResponse.log_message:type_name -> remote.LogEntry + 2, // 14: remote.ChangeResponse.state:type_name -> remote.ChangeResponse.State + 9, // 15: remote.DNSSet.RecordsEntry.value:type_name -> remote.RecordSet + 11, // 16: remote.ZoneState.DnsSetsEntry.value:type_name -> remote.DNSSet + 3, // 17: remote.RemoteProvider.Login:input_type -> remote.LoginRequest + 5, // 18: remote.RemoteProvider.GetZones:input_type -> remote.GetZonesRequest + 8, // 19: remote.RemoteProvider.GetZoneState:input_type -> remote.GetZoneStateRequest + 14, // 20: remote.RemoteProvider.Execute:input_type -> remote.ExecuteRequest + 4, // 21: remote.RemoteProvider.Login:output_type -> remote.LoginResponse + 6, // 22: remote.RemoteProvider.GetZones:output_type -> remote.Zones + 13, // 23: remote.RemoteProvider.GetZoneState:output_type -> remote.ZoneState + 17, // 24: remote.RemoteProvider.Execute:output_type -> remote.ExecuteResponse + 21, // [21:25] is the sub-list for method output_type + 17, // [17:21] is the sub-list for method input_type + 17, // [17:17] is the sub-list for extension type_name + 17, // [17:17] is the sub-list for extension extendee + 0, // [0:17] is the sub-list for field type_name } func init() { file_pkg_server_remote_common_remote_proto_init() } @@ -1537,7 +1549,7 @@ func file_pkg_server_remote_common_remote_proto_init() { } } file_pkg_server_remote_common_remote_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DNSSet); i { + switch v := v.(*RoutingPolicy); i { case 0: return &v.state case 1: @@ -1549,7 +1561,7 @@ func file_pkg_server_remote_common_remote_proto_init() { } } file_pkg_server_remote_common_remote_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PartialDNSSet); i { + switch v := v.(*DNSSet); i { case 0: return &v.state case 1: @@ -1561,7 +1573,7 @@ func file_pkg_server_remote_common_remote_proto_init() { } } file_pkg_server_remote_common_remote_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ZoneState); i { + switch v := v.(*PartialDNSSet); i { case 0: return &v.state case 1: @@ -1573,7 +1585,7 @@ func file_pkg_server_remote_common_remote_proto_init() { } } file_pkg_server_remote_common_remote_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteRequest); i { + switch v := v.(*ZoneState); i { case 0: return &v.state case 1: @@ -1585,7 +1597,7 @@ func file_pkg_server_remote_common_remote_proto_init() { } } file_pkg_server_remote_common_remote_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChangeRequest); i { + switch v := v.(*ExecuteRequest); i { case 0: return &v.state case 1: @@ -1597,7 +1609,7 @@ func file_pkg_server_remote_common_remote_proto_init() { } } file_pkg_server_remote_common_remote_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LogEntry); i { + switch v := v.(*ChangeRequest); i { case 0: return &v.state case 1: @@ -1609,7 +1621,7 @@ func file_pkg_server_remote_common_remote_proto_init() { } } file_pkg_server_remote_common_remote_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteResponse); i { + switch v := v.(*LogEntry); i { case 0: return &v.state case 1: @@ -1621,7 +1633,7 @@ func file_pkg_server_remote_common_remote_proto_init() { } } file_pkg_server_remote_common_remote_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChangeResponse); i { + switch v := v.(*ExecuteResponse); i { case 0: return &v.state case 1: @@ -1633,7 +1645,7 @@ func file_pkg_server_remote_common_remote_proto_init() { } } file_pkg_server_remote_common_remote_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RecordSet_Record); i { + switch v := v.(*ChangeResponse); i { case 0: return &v.state case 1: @@ -1645,7 +1657,7 @@ func file_pkg_server_remote_common_remote_proto_init() { } } file_pkg_server_remote_common_remote_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RecordSet_RoutingPolicy); i { + switch v := v.(*RecordSet_Record); i { case 0: return &v.state case 1: diff --git a/pkg/server/remote/common/remote.proto b/pkg/server/remote/common/remote.proto index 10cfa3492..ce2bdc68e 100644 --- a/pkg/server/remote/common/remote.proto +++ b/pkg/server/remote/common/remote.proto @@ -53,15 +53,14 @@ message RecordSet { string value = 1; } - message RoutingPolicy { - string type = 1; - map parameters = 2; - } - string type = 1; int32 ttl = 2; repeated Record record = 3; - RoutingPolicy routing_policy = 4; +} + +message RoutingPolicy { + string type = 1; + map parameters = 2; } message DNSSet { @@ -69,6 +68,7 @@ message DNSSet { string update_group = 2; map records = 3; string set_identifier = 4; + RoutingPolicy routing_policy = 5; } message PartialDNSSet { @@ -77,6 +77,7 @@ message PartialDNSSet { string record_type = 3; RecordSet record_set = 4; string set_identifier = 5; + RoutingPolicy routing_policy = 6; } message ZoneState { diff --git a/pkg/server/remote/conversion/conversion.go b/pkg/server/remote/conversion/conversion.go index a7f598947..33606e568 100644 --- a/pkg/server/remote/conversion/conversion.go +++ b/pkg/server/remote/conversion/conversion.go @@ -58,6 +58,7 @@ func MarshalDNSSet(local *dns.DNSSet) *common.DNSSet { SetIdentifier: local.Name.SetIdentifier, UpdateGroup: local.UpdateGroup, Records: map[string]*common.RecordSet{}, + RoutingPolicy: MarshalRoutingPolicy(local.RoutingPolicy), } for typ, rs := range local.Sets { remote.Records[typ] = MarshalRecordSet(rs) @@ -67,9 +68,8 @@ func MarshalDNSSet(local *dns.DNSSet) *common.DNSSet { func MarshalRecordSet(local *dns.RecordSet) *common.RecordSet { remote := &common.RecordSet{ - Type: local.Type, - Ttl: int32(local.TTL), - RoutingPolicy: MarshalRoutingPolicy(local.RoutingPolicy), + Type: local.Type, + Ttl: int32(local.TTL), } for _, v := range local.Records { remote.Record = append(remote.Record, &common.RecordSet_Record{Value: v.Value}) @@ -77,7 +77,7 @@ func MarshalRecordSet(local *dns.RecordSet) *common.RecordSet { return remote } -func MarshalRoutingPolicy(local *dns.RoutingPolicy) *common.RecordSet_RoutingPolicy { +func MarshalRoutingPolicy(local *dns.RoutingPolicy) *common.RoutingPolicy { if local == nil { return nil } @@ -85,7 +85,7 @@ func MarshalRoutingPolicy(local *dns.RoutingPolicy) *common.RecordSet_RoutingPol for k, v := range local.Parameters { params[k] = v } - return &common.RecordSet_RoutingPolicy{ + return &common.RoutingPolicy{ Type: local.Type, Parameters: params, } @@ -98,6 +98,7 @@ func MarshalPartialDNSSet(local *dns.DNSSet, recordType string) *common.PartialD UpdateGroup: local.UpdateGroup, RecordType: recordType, RecordSet: MarshalRecordSet(local.Sets[recordType]), + RoutingPolicy: MarshalRoutingPolicy(local.RoutingPolicy), } } @@ -110,7 +111,8 @@ func UnmarshalDNSSets(remote common.DNSSets) dns.DNSSets { } func UnmarshalDNSSet(remote *common.DNSSet) *dns.DNSSet { - local := dns.NewDNSSet(dns.DNSSetName{DNSName: remote.DnsName, SetIdentifier: remote.SetIdentifier}) + policy := UnmarshalRoutingPolicy(remote.RoutingPolicy) + local := dns.NewDNSSet(dns.DNSSetName{DNSName: remote.DnsName, SetIdentifier: remote.SetIdentifier}, policy) local.UpdateGroup = remote.UpdateGroup for typ, rs := range remote.Records { @@ -121,14 +123,13 @@ func UnmarshalDNSSet(remote *common.DNSSet) *dns.DNSSet { func UnmarshalRecordSet(rs *common.RecordSet) *dns.RecordSet { local := dns.NewRecordSet(rs.Type, int64(rs.Ttl), nil) - local.RoutingPolicy = UnmarshalRoutingPolicy(rs.RoutingPolicy) for _, v := range rs.Record { local.Add(&dns.Record{Value: v.Value}) } return local } -func UnmarshalRoutingPolicy(policy *common.RecordSet_RoutingPolicy) *dns.RoutingPolicy { +func UnmarshalRoutingPolicy(policy *common.RoutingPolicy) *dns.RoutingPolicy { if policy == nil { return nil } @@ -143,7 +144,8 @@ func UnmarshalRoutingPolicy(policy *common.RecordSet_RoutingPolicy) *dns.Routing } func UnmarshalPartialDNSSet(remote *common.PartialDNSSet) *dns.DNSSet { - local := dns.NewDNSSet(dns.DNSSetName{DNSName: remote.DnsName, SetIdentifier: remote.SetIdentifier}) + policy := UnmarshalRoutingPolicy(remote.RoutingPolicy) + local := dns.NewDNSSet(dns.DNSSetName{DNSName: remote.DnsName, SetIdentifier: remote.SetIdentifier}, policy) local.UpdateGroup = remote.UpdateGroup local.Sets[remote.RecordType] = UnmarshalRecordSet(remote.RecordSet) diff --git a/pkg/server/remote/conversion/conversion_test.go b/pkg/server/remote/conversion/conversion_test.go index 4d5b3378c..a275a2edb 100644 --- a/pkg/server/remote/conversion/conversion_test.go +++ b/pkg/server/remote/conversion/conversion_test.go @@ -29,18 +29,18 @@ func TestMarshalDNSSets(t *testing.T) { sets1 := dns.DNSSets{} rsb := dns.NewRecordSet(dns.RS_A, 100, []*dns.Record{{Value: "1.1.1.1"}, {Value: "1.1.1.2"}}) rsc1 := dns.NewRecordSet(dns.RS_TXT, 200, []*dns.Record{{Value: "foo"}, {Value: "bar"}}) - rsc1.RoutingPolicy = &dns.RoutingPolicy{ + routingPolicy1 := &dns.RoutingPolicy{ Type: "weighted", Parameters: map[string]string{"weight": "1"}, } rsc2 := dns.NewRecordSet(dns.RS_TXT, 200, []*dns.Record{{Value: "foo"}, {Value: "bla"}}) - rsc2.RoutingPolicy = &dns.RoutingPolicy{ + routingPolicy2 := &dns.RoutingPolicy{ Type: "weighted", Parameters: map[string]string{"weight": "2"}, } - sets1.AddRecordSet(dns.DNSSetName{DNSName: "b.a"}, rsb) - sets1.AddRecordSet(dns.DNSSetName{DNSName: "c.a", SetIdentifier: "id1"}, rsc1) - sets1.AddRecordSet(dns.DNSSetName{DNSName: "c.a", SetIdentifier: "id2"}, rsc2) + sets1.AddRecordSet(dns.DNSSetName{DNSName: "b.a"}, nil, rsb) + sets1.AddRecordSet(dns.DNSSetName{DNSName: "c.a", SetIdentifier: "id1"}, routingPolicy1, rsc1) + sets1.AddRecordSet(dns.DNSSetName{DNSName: "c.a", SetIdentifier: "id2"}, routingPolicy2, rsc2) table := []struct { name string sets dns.DNSSets @@ -73,7 +73,7 @@ func TestMarshalChangeRequest(t *testing.T) { } func TestMarshalChangeRequestWithRoutingPolicy(t *testing.T) { - doTestMarshalChangeRequest(t, false) + doTestMarshalChangeRequest(t, true) } func doTestMarshalChangeRequest(t *testing.T, withPolicy bool) { @@ -86,18 +86,18 @@ func doTestMarshalChangeRequest(t *testing.T, withPolicy bool) { Parameters: map[string]string{"weight": "100"}, } } - set := dns.NewDNSSet(dns.DNSSetName{DNSName: "b.a", SetIdentifier: setIdentifier}) + set := dns.NewDNSSet(dns.DNSSetName{DNSName: "b.a", SetIdentifier: setIdentifier}, routingPolicy) set.UpdateGroup = "group1" - set.SetMetaAttr(dns.ATTR_OWNER, "owner1", routingPolicy) - set.SetMetaAttr(dns.ATTR_PREFIX, "comment-", routingPolicy) - set.SetRecordSet(dns.RS_A, 100, routingPolicy, "1.1.1.1", "1.1.1.2") + set.SetMetaAttr(dns.ATTR_OWNER, "owner1") + set.SetMetaAttr(dns.ATTR_PREFIX, "comment-") + set.SetRecordSet(dns.RS_A, 100, "1.1.1.1", "1.1.1.2") table := []struct { name string request *provider.ChangeRequest }{ - {"create", provider.NewChangeRequest(provider.R_CREATE, dns.RS_A, nil, set, nil, routingPolicy)}, - {"update", provider.NewChangeRequest(provider.R_UPDATE, dns.RS_META, nil, set, nil, routingPolicy)}, - {"delete", provider.NewChangeRequest(provider.R_DELETE, dns.RS_A, set, nil, nil, routingPolicy)}, + {"create", provider.NewChangeRequest(provider.R_CREATE, dns.RS_A, nil, set, nil)}, + {"update", provider.NewChangeRequest(provider.R_UPDATE, dns.RS_META, nil, set, nil)}, + {"delete", provider.NewChangeRequest(provider.R_DELETE, dns.RS_A, set, nil, nil)}, } for _, item := range table { @@ -121,7 +121,7 @@ func doTestMarshalChangeRequest(t *testing.T, withPolicy bool) { del = item.request.Deletion.Clone() del.Sets = map[string]*dns.RecordSet{item.request.Type: del.Sets[item.request.Type]} } - expected := provider.NewChangeRequest(item.request.Action, item.request.Type, del, add, item.request.Done, routingPolicy) + expected := provider.NewChangeRequest(item.request.Action, item.request.Type, del, add, item.request.Done) expected.Done = nil if !reflect.DeepEqual(expected, copy) { t.Errorf("change request mismatch: %s", item.name) From f9cde15567b8fe9020ef1756706d04a7eae3d607 Mon Sep 17 00:00:00 2001 From: Martin Weindel Date: Thu, 28 Jul 2022 14:55:41 +0200 Subject: [PATCH 7/7] weighted routing policy for google-clouddns --- docs/aws-route53/README.md | 2 +- docs/google-cloud-dns/README.md | 51 + examples/41-entry-weighted.yaml | 4 +- examples/51-ingress-weighted.yaml | 2 +- examples/51-service-weighted.yaml | 2 +- go.mod | 26 +- go.sum | 109 +- pkg/controller/provider/google/execution.go | 147 +- .../provider/google/execution_test.go | 447 ++ .../provider/google/google_suite_test.go | 30 + pkg/controller/provider/google/handler.go | 39 +- .../provider/google/routingpolicy.go | 225 + pkg/dns/dnsset.go | 6 + test/functional/routingpolicies.go | 2 +- .../cloud.google.com/go/{ => compute}/LICENSE | 0 .../go/compute/metadata/metadata.go | 27 +- .../golang/protobuf/jsonpb/decode.go | 524 ++ .../golang/protobuf/jsonpb/encode.go | 559 ++ .../github.com/golang/protobuf/jsonpb/json.go | 69 + .../github.com/google/go-cmp/cmp/compare.go | 19 +- .../google/go-cmp/cmp/export_panic.go | 1 + .../google/go-cmp/cmp/export_unsafe.go | 1 + .../go-cmp/cmp/internal/diff/debug_disable.go | 1 + .../go-cmp/cmp/internal/diff/debug_enable.go | 1 + .../cmp/internal/flags/toolchain_legacy.go | 10 - .../cmp/internal/flags/toolchain_recent.go | 10 - .../google/go-cmp/cmp/internal/value/name.go | 7 + .../cmp/internal/value/pointer_purego.go | 1 + .../cmp/internal/value/pointer_unsafe.go | 1 + vendor/github.com/google/go-cmp/cmp/path.go | 2 +- .../google/go-cmp/cmp/report_compare.go | 5 +- .../google/go-cmp/cmp/report_reflect.go | 13 +- .../google/go-cmp/cmp/report_slices.go | 6 +- vendor/github.com/google/uuid/.travis.yml | 9 + vendor/github.com/google/uuid/CONTRIBUTING.md | 10 + vendor/github.com/google/uuid/CONTRIBUTORS | 9 + vendor/github.com/google/uuid/LICENSE | 27 + vendor/github.com/google/uuid/README.md | 19 + vendor/github.com/google/uuid/dce.go | 80 + vendor/github.com/google/uuid/doc.go | 12 + vendor/github.com/google/uuid/hash.go | 53 + vendor/github.com/google/uuid/marshal.go | 38 + vendor/github.com/google/uuid/node.go | 90 + vendor/github.com/google/uuid/node_js.go | 12 + vendor/github.com/google/uuid/node_net.go | 33 + vendor/github.com/google/uuid/null.go | 118 + vendor/github.com/google/uuid/sql.go | 59 + vendor/github.com/google/uuid/time.go | 123 + vendor/github.com/google/uuid/util.go | 43 + vendor/github.com/google/uuid/uuid.go | 294 + vendor/github.com/google/uuid/version1.go | 44 + vendor/github.com/google/uuid/version4.go | 76 + .../enterprise-certificate-proxy/LICENSE | 202 + .../client/client.go | 151 + .../client/util/util.go | 72 + .../gax-go/v2/.release-please-manifest.json | 3 + .../googleapis/gax-go/v2/CHANGES.md | 18 + .../googleapis/gax-go/v2/apierror/apierror.go | 6 +- .../v2/apierror/internal/proto/error.pb.go | 110 +- .../googleapis/gax-go/v2/call_option.go | 55 + vendor/github.com/googleapis/gax-go/v2/gax.go | 4 +- .../googleapis/gax-go/v2/internal/version.go | 33 + .../googleapis/gax-go/v2/proto_json_stream.go | 126 + .../gax-go/v2/release-please-config.json | 10 + vendor/golang.org/x/net/bpf/doc.go | 6 +- vendor/golang.org/x/net/context/context.go | 6 +- vendor/golang.org/x/net/context/go17.go | 10 +- vendor/golang.org/x/net/context/pre_go17.go | 10 +- .../golang.org/x/net/http/httpguts/httplex.go | 54 +- .../x/net/http2/client_conn_pool.go | 3 +- vendor/golang.org/x/net/http2/errors.go | 2 +- vendor/golang.org/x/net/http2/frame.go | 3 +- .../golang.org/x/net/http2/hpack/huffman.go | 87 +- vendor/golang.org/x/net/http2/http2.go | 14 +- vendor/golang.org/x/net/http2/server.go | 135 +- vendor/golang.org/x/net/http2/transport.go | 18 +- .../x/net/http2/writesched_priority.go | 9 +- vendor/golang.org/x/net/idna/trieval.go | 34 +- .../x/net/internal/socket/zsys_linux_ppc.go | 32 +- vendor/golang.org/x/net/ipv4/doc.go | 12 +- vendor/golang.org/x/net/ipv6/doc.go | 12 +- vendor/golang.org/x/net/publicsuffix/list.go | 7 +- vendor/golang.org/x/net/publicsuffix/table.go | 2 + .../x/oauth2/authhandler/authhandler.go | 44 +- vendor/golang.org/x/oauth2/google/default.go | 38 +- vendor/golang.org/x/oauth2/google/doc.go | 13 +- vendor/golang.org/x/oauth2/google/error.go | 64 + vendor/golang.org/x/oauth2/google/google.go | 1 + .../google/internal/externalaccount/aws.go | 76 +- .../externalaccount/basecredentials.go | 11 +- vendor/golang.org/x/oauth2/google/jwt.go | 3 +- vendor/golang.org/x/sys/execabs/execabs.go | 2 +- .../golang.org/x/sys/execabs/execabs_go118.go | 12 + .../golang.org/x/sys/execabs/execabs_go119.go | 15 + vendor/golang.org/x/sys/plan9/syscall.go | 1 + .../golang.org/x/sys/plan9/syscall_plan9.go | 10 + .../golang.org/x/sys/unix/asm_linux_loong64.s | 4 +- vendor/golang.org/x/sys/unix/endian_little.go | 4 +- vendor/golang.org/x/sys/unix/ifreq_linux.go | 9 +- vendor/golang.org/x/sys/unix/syscall_aix.go | 10 +- vendor/golang.org/x/sys/unix/syscall_bsd.go | 46 +- .../golang.org/x/sys/unix/syscall_darwin.go | 9 +- .../x/sys/unix/syscall_dragonfly.go | 2 + .../golang.org/x/sys/unix/syscall_illumos.go | 5 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 157 +- .../x/sys/unix/syscall_linux_loong64.go | 226 + .../x/sys/unix/syscall_linux_riscv64.go | 1 + .../golang.org/x/sys/unix/syscall_openbsd.go | 2 + .../golang.org/x/sys/unix/syscall_solaris.go | 51 +- vendor/golang.org/x/sys/unix/syscall_unix.go | 74 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 40 +- .../x/sys/unix/zerrors_linux_386.go | 4 +- .../x/sys/unix/zerrors_linux_amd64.go | 4 +- .../x/sys/unix/zerrors_linux_arm.go | 4 +- .../x/sys/unix/zerrors_linux_arm64.go | 5 +- .../x/sys/unix/zerrors_linux_loong64.go | 818 ++ .../x/sys/unix/zerrors_linux_mips.go | 4 +- .../x/sys/unix/zerrors_linux_mips64.go | 4 +- .../x/sys/unix/zerrors_linux_mips64le.go | 4 +- .../x/sys/unix/zerrors_linux_mipsle.go | 4 +- .../x/sys/unix/zerrors_linux_ppc.go | 4 +- .../x/sys/unix/zerrors_linux_ppc64.go | 4 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 4 +- .../x/sys/unix/zerrors_linux_riscv64.go | 4 +- .../x/sys/unix/zerrors_linux_s390x.go | 4 +- .../x/sys/unix/zerrors_linux_sparc64.go | 4 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 24 + .../x/sys/unix/zsyscall_darwin_amd64.s | 6 + .../x/sys/unix/zsyscall_darwin_arm64.go | 24 + .../x/sys/unix/zsyscall_darwin_arm64.s | 6 + .../x/sys/unix/zsyscall_linux_loong64.go | 527 ++ .../x/sys/unix/zsyscall_linux_riscv64.go | 11 + .../x/sys/unix/zsyscall_solaris_amd64.go | 14 + .../x/sys/unix/zsysnum_linux_loong64.go | 311 + .../x/sys/unix/zsysnum_linux_riscv64.go | 1 + .../x/sys/unix/ztypes_darwin_amd64.go | 73 +- .../x/sys/unix/ztypes_darwin_arm64.go | 73 +- vendor/golang.org/x/sys/unix/ztypes_linux.go | 22 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 9 +- .../x/sys/unix/ztypes_linux_amd64.go | 8 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 9 +- .../x/sys/unix/ztypes_linux_arm64.go | 8 +- .../x/sys/unix/ztypes_linux_loong64.go | 685 ++ .../x/sys/unix/ztypes_linux_mips.go | 9 +- .../x/sys/unix/ztypes_linux_mips64.go | 8 +- .../x/sys/unix/ztypes_linux_mips64le.go | 8 +- .../x/sys/unix/ztypes_linux_mipsle.go | 9 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 9 +- .../x/sys/unix/ztypes_linux_ppc64.go | 8 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 8 +- .../x/sys/unix/ztypes_linux_riscv64.go | 8 +- .../x/sys/unix/ztypes_linux_s390x.go | 8 +- .../x/sys/unix/ztypes_linux_sparc64.go | 8 +- .../x/sys/unix/ztypes_openbsd_386.go | 8 +- .../x/sys/unix/ztypes_openbsd_amd64.go | 8 +- .../x/sys/unix/ztypes_openbsd_arm.go | 8 +- .../x/sys/unix/ztypes_openbsd_arm64.go | 8 +- .../x/sys/unix/ztypes_openbsd_mips64.go | 8 +- .../x/sys/unix/ztypes_solaris_amd64.go | 2 +- .../golang.org/x/sys/windows/exec_windows.go | 10 +- .../x/sys/windows/syscall_windows.go | 1 - vendor/golang.org/x/xerrors/doc.go | 3 +- vendor/golang.org/x/xerrors/fmt.go | 3 + vendor/golang.org/x/xerrors/wrap.go | 6 + .../google.golang.org/api/dns/v1/dns-api.json | 1401 +++- .../google.golang.org/api/dns/v1/dns-gen.go | 7104 +++++++++++++---- .../api/googleapi/googleapi.go | 24 +- .../api/internal/gensupport/media.go | 16 +- .../api/internal/gensupport/resumable.go | 52 +- .../api/internal/gensupport/retry.go | 21 +- .../api/internal/gensupport/send.go | 21 + .../google.golang.org/api/internal/version.go | 8 + .../api/option/credentials_go19.go | 24 - .../api/option/credentials_notgo19.go | 23 - vendor/google.golang.org/api/option/option.go | 14 +- .../api/transport/cert/default_cert.go | 123 +- .../api/transport/cert/enterprise_cert.go | 56 + .../api/transport/cert/secureconnect_cert.go | 123 + .../transport/http/default_transport_go113.go | 21 - .../http/default_transport_not_go113.go | 16 - .../api/transport/http/dial.go | 11 + vendor/google.golang.org/grpc/CONTRIBUTING.md | 7 +- .../grpc/attributes/attributes.go | 80 +- .../grpc/balancer/balancer.go | 32 +- .../grpc/balancer/base/balancer.go | 65 +- .../grpc/balancer/grpclb/state/state.go | 2 +- .../grpc/balancer_conn_wrappers.go | 318 +- .../grpc/channelz/channelz.go | 36 + vendor/google.golang.org/grpc/clientconn.go | 574 +- .../grpc/credentials/credentials.go | 25 +- .../grpc/credentials/insecure/insecure.go | 98 + vendor/google.golang.org/grpc/dialoptions.go | 101 +- .../grpc/encoding/encoding.go | 2 +- .../grpc/grpclog/loggerv2.go | 94 +- vendor/google.golang.org/grpc/interceptor.go | 9 +- .../balancer/gracefulswitch/gracefulswitch.go | 382 + .../grpc/internal/binarylog/binarylog.go | 91 +- .../grpc/internal/binarylog/env_config.go | 6 +- .../grpc/internal/binarylog/method_logger.go | 26 +- .../grpc/internal/channelz/funcs.go | 238 +- .../grpc/internal/channelz/id.go | 75 + .../grpc/internal/channelz/logging.go | 91 +- .../grpc/internal/channelz/types.go | 23 +- .../grpc/internal/envconfig/envconfig.go | 5 - .../grpc/internal/envconfig/xds.go | 101 + .../grpc/internal/grpclog/grpclog.go | 8 +- .../grpc/internal/grpcutil/grpcutil.go | 20 + .../grpc/internal/grpcutil/regex.go | 31 + .../grpc/internal/grpcutil/target.go | 89 - .../grpc/internal/internal.go | 13 +- .../grpc/internal/metadata/metadata.go | 76 +- .../grpc/internal/pretty/pretty.go | 82 + .../grpc/internal/resolver/config_selector.go | 2 +- .../grpc/internal/resolver/unix/unix.go | 12 +- .../grpc/internal/transport/controlbuf.go | 14 +- .../grpc/internal/transport/flowcontrol.go | 4 +- .../grpc/internal/transport/http2_client.go | 107 +- .../grpc/internal/transport/http2_server.go | 180 +- .../transport/networktype/networktype.go | 2 +- .../grpc/internal/transport/proxy.go | 4 +- .../grpc/internal/transport/transport.go | 11 +- .../grpc/internal/xds/env/env.go | 95 - .../grpc/internal/xds_handshake_cluster.go | 2 +- .../grpc/metadata/metadata.go | 8 +- .../google.golang.org/grpc/picker_wrapper.go | 10 +- vendor/google.golang.org/grpc/pickfirst.go | 126 +- vendor/google.golang.org/grpc/regenerate.sh | 34 +- vendor/google.golang.org/grpc/resolver/map.go | 109 + .../grpc/resolver/resolver.go | 62 +- .../grpc/resolver_conn_wrapper.go | 23 +- vendor/google.golang.org/grpc/rpc_util.go | 12 +- vendor/google.golang.org/grpc/server.go | 140 +- .../google.golang.org/grpc/service_config.go | 5 +- .../google.golang.org/grpc/status/status.go | 34 +- vendor/google.golang.org/grpc/stream.go | 235 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 2 +- .../protobuf/encoding/protowire/wire.go | 19 +- .../protobuf/internal/encoding/text/decode.go | 2 +- .../protobuf/internal/errors/is_go112.go | 1 + .../protobuf/internal/errors/is_go113.go | 1 + .../internal/flags/proto_legacy_disable.go | 1 + .../internal/flags/proto_legacy_enable.go | 1 + .../protobuf/internal/impl/codec_map_go111.go | 1 + .../protobuf/internal/impl/codec_map_go112.go | 1 + .../protobuf/internal/impl/codec_reflect.go | 1 + .../protobuf/internal/impl/codec_unsafe.go | 1 + .../protobuf/internal/impl/decode.go | 8 + .../protobuf/internal/impl/pointer_reflect.go | 1 + .../protobuf/internal/impl/pointer_unsafe.go | 1 + .../protobuf/internal/strs/strings_pure.go | 1 + .../protobuf/internal/strs/strings_unsafe.go | 1 + .../protobuf/internal/version/version.go | 4 +- .../protobuf/proto/decode.go | 17 +- .../protobuf/proto/proto_methods.go | 1 + .../protobuf/proto/proto_reflect.go | 1 + .../protobuf/reflect/protoreflect/methods.go | 1 + .../reflect/protoreflect/value_pure.go | 1 + .../reflect/protoreflect/value_union.go | 25 + .../reflect/protoreflect/value_unsafe.go | 1 + .../protobuf/runtime/protoiface/methods.go | 1 + vendor/modules.txt | 54 +- 262 files changed, 17641 insertions(+), 4107 deletions(-) create mode 100644 pkg/controller/provider/google/execution_test.go create mode 100644 pkg/controller/provider/google/google_suite_test.go create mode 100644 pkg/controller/provider/google/routingpolicy.go rename vendor/cloud.google.com/go/{ => compute}/LICENSE (100%) create mode 100644 vendor/github.com/golang/protobuf/jsonpb/decode.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/encode.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/json.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go create mode 100644 vendor/github.com/google/uuid/.travis.yml create mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md create mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS create mode 100644 vendor/github.com/google/uuid/LICENSE create mode 100644 vendor/github.com/google/uuid/README.md create mode 100644 vendor/github.com/google/uuid/dce.go create mode 100644 vendor/github.com/google/uuid/doc.go create mode 100644 vendor/github.com/google/uuid/hash.go create mode 100644 vendor/github.com/google/uuid/marshal.go create mode 100644 vendor/github.com/google/uuid/node.go create mode 100644 vendor/github.com/google/uuid/node_js.go create mode 100644 vendor/github.com/google/uuid/node_net.go create mode 100644 vendor/github.com/google/uuid/null.go create mode 100644 vendor/github.com/google/uuid/sql.go create mode 100644 vendor/github.com/google/uuid/time.go create mode 100644 vendor/github.com/google/uuid/util.go create mode 100644 vendor/github.com/google/uuid/uuid.go create mode 100644 vendor/github.com/google/uuid/version1.go create mode 100644 vendor/github.com/google/uuid/version4.go create mode 100644 vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE create mode 100644 vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go create mode 100644 vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json create mode 100644 vendor/github.com/googleapis/gax-go/v2/CHANGES.md create mode 100644 vendor/github.com/googleapis/gax-go/v2/internal/version.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/release-please-config.json create mode 100644 vendor/golang.org/x/oauth2/google/error.go create mode 100644 vendor/golang.org/x/sys/execabs/execabs_go118.go create mode 100644 vendor/golang.org/x/sys/execabs/execabs_go119.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go create mode 100644 vendor/google.golang.org/api/internal/version.go delete mode 100644 vendor/google.golang.org/api/option/credentials_go19.go delete mode 100644 vendor/google.golang.org/api/option/credentials_notgo19.go create mode 100644 vendor/google.golang.org/api/transport/cert/enterprise_cert.go create mode 100644 vendor/google.golang.org/api/transport/cert/secureconnect_cert.go delete mode 100644 vendor/google.golang.org/api/transport/http/default_transport_go113.go delete mode 100644 vendor/google.golang.org/api/transport/http/default_transport_not_go113.go create mode 100644 vendor/google.golang.org/grpc/channelz/channelz.go create mode 100644 vendor/google.golang.org/grpc/credentials/insecure/insecure.go create mode 100644 vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/id.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/xds.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/regex.go delete mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/target.go create mode 100644 vendor/google.golang.org/grpc/internal/pretty/pretty.go delete mode 100644 vendor/google.golang.org/grpc/internal/xds/env/env.go create mode 100644 vendor/google.golang.org/grpc/resolver/map.go diff --git a/docs/aws-route53/README.md b/docs/aws-route53/README.md index 165865224..3bdeac2ba 100644 --- a/docs/aws-route53/README.md +++ b/docs/aws-route53/README.md @@ -128,7 +128,7 @@ metadata: # If you are delegating the certificate management to Gardener, uncomment the following line (see https://gardener.cloud/documentation/guides/administer_shoots/x509_certificates/) #cert.gardener.cloud/purpose: managed # routing-policy annotation provides the `.spec.routingPolicy` section as JSON - # Note: Currently only supported for aws-route53 (see https://github.com/gardener/external-dns-management/tree/master/docs/aws-route53#weighted-routing-policy) + # Note: Currently only supported for aws-route53 or google-clouddns (see https://github.com/gardener/external-dns-management/tree/master/docs/aws-route53#weighted-routing-policy) dns.gardener.cloud/routing-policy: '{"type": "weighted", "setIdentifier": "my-id", "parameters": {"weight": "10"}}' name: test-ingress-weighted-routing-policy namespace: default diff --git a/docs/google-cloud-dns/README.md b/docs/google-cloud-dns/README.md index 2e5657b6b..e3488c334 100644 --- a/docs/google-cloud-dns/README.md +++ b/docs/google-cloud-dns/README.md @@ -50,4 +50,55 @@ data: # replace '...' with json key from service account creation (encoded as base64) # see https://cloud.google.com/iam/docs/creating-managing-service-accounts serviceaccount.json: ... +``` + +## Routing Policy + +The Google CloudDNS provider supports currently only the `weighted` routing policy. + +### Weighted Routing Policy + +Each weighted record set is defined by a separate `DNSEntry`. In this way it is possible to use different dns-controller-manager deployments +acting on the same domain names. Every record set needs a `SetIdentifier` which must be a digit "0", "1", "2", "3", or "4" (representing the index in the +resource record set policy). +Weighted routing policy is supported for all record types, i.e. `A`, `AAAA`, `CNAME`, and `TXT`. +All entries of the same domain name must have the same record type and TTL. Only integral weights >= 0 are allowed. + +#### Annotating Ingress or Service Resources with Routing Policy + +To specify the routing policy, add an annotation `dns.gardener.cloud/routing-policy` +containing the routing policy section in JSON format to the `Ingress` or `Service` resource. +E.g. for an ingress resource: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + dns.gardener.cloud/dnsnames: '*' + # If you are delegating the DNS management to Gardener, uncomment the following line (see https://gardener.cloud/documentation/guides/administer_shoots/dns_names/) + #dns.gardener.cloud/class: garden + # If you are delegating the certificate management to Gardener, uncomment the following line (see https://gardener.cloud/documentation/guides/administer_shoots/x509_certificates/) + #cert.gardener.cloud/purpose: managed + # routing-policy annotation provides the `.spec.routingPolicy` section as JSON + # Note: Currently only supported for aws-route53 and google-clouddns + dns.gardener.cloud/routing-policy: '{"type": "weighted", "setIdentifier": "0", "parameters": {"weight": "10"}}' + name: test-ingress-weighted-routing-policy + namespace: default +spec: + rules: + - host: test.ingress.my-dns-domain.com + http: + paths: + - backend: + service: + name: my-service + port: + number: 9000 + path: / + pathType: Prefix + tls: + - hosts: + - test.ingress.my-dns-domain.com + #secretName: my-cert-secret-name ``` \ No newline at end of file diff --git a/examples/41-entry-weighted.yaml b/examples/41-entry-weighted.yaml index 5d64b18fe..e421e030a 100644 --- a/examples/41-entry-weighted.yaml +++ b/examples/41-entry-weighted.yaml @@ -11,7 +11,7 @@ spec: ttl: 120 targets: - instance-a.service.example.com - # routingPolicy is current only supported for AWS Route53 + # routingPolicy is current only supported for AWS Route53 or Google CloudDNS routingPolicy: type: weighted setIdentifier: instance-a @@ -31,7 +31,7 @@ spec: ttl: 120 targets: - instance-b.service.example.com - # routingPolicy is current only supported for AWS Route53 + # routingPolicy is current only supported for AWS Route53 or Google CloudDNS routingPolicy: type: weighted setIdentifier: instance-b diff --git a/examples/51-ingress-weighted.yaml b/examples/51-ingress-weighted.yaml index 25c263e9b..794dc99b0 100644 --- a/examples/51-ingress-weighted.yaml +++ b/examples/51-ingress-weighted.yaml @@ -8,7 +8,7 @@ metadata: # If you are delegating the certificate management to Gardener, uncomment the following line (see https://gardener.cloud/documentation/guides/administer_shoots/x509_certificates/) #cert.gardener.cloud/purpose: managed # routing-policy annotation provides the `.spec.routingPolicy` section as JSON - # Note: Currently only supported for aws-route53 (see https://github.com/gardener/external-dns-management/tree/master/docs/aws-route53#weighted-routing-policy) + # Note: Currently only supported for aws-route53 or google-clouddns (see https://github.com/gardener/external-dns-management/tree/master/docs/aws-route53#weighted-routing-policy) dns.gardener.cloud/routing-policy: '{"type": "weighted", "setIdentifier": "my-id", "parameters": {"weight": "10"}}' name: test-ingress-weighted-routing-policy namespace: default diff --git a/examples/51-service-weighted.yaml b/examples/51-service-weighted.yaml index 8cc025791..e0ecc643e 100644 --- a/examples/51-service-weighted.yaml +++ b/examples/51-service-weighted.yaml @@ -7,7 +7,7 @@ metadata: # If you are delegating the DNS Management to Gardener, uncomment the following line (see https://gardener.cloud/documentation/guides/administer_shoots/dns_names/) #dns.gardener.cloud/class: garden # routing-policy annotation provides the `.spec.routingPolicy` section as JSON - # Note: Currently only supported for aws-route53 (see https://github.com/gardener/external-dns-management/tree/master/docs/aws-route53#weighted-routing-policy) + # Note: Currently only supported for aws-route53 or google-clouddns (see https://github.com/gardener/external-dns-management/tree/master/docs/aws-route53#weighted-routing-policy) dns.gardener.cloud/routing-policy: '{"type": "weighted", "setIdentifier": "my-id", "parameters": {"weight": "10"}}' name: test-service-weighted namespace: default diff --git a/go.mod b/go.mod index 3983312ad..e2222625b 100644 --- a/go.mod +++ b/go.mod @@ -25,11 +25,11 @@ require ( go.uber.org/atomic v1.9.0 go.uber.org/automaxprocs v1.4.0 golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 - golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - google.golang.org/api v0.63.0 - google.golang.org/grpc v1.41.0 - google.golang.org/protobuf v1.27.1 + golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2 + golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f + google.golang.org/api v0.88.0 + google.golang.org/grpc v1.47.0 + google.golang.org/protobuf v1.28.0 k8s.io/api v0.24.1 k8s.io/apimachinery v0.24.1 k8s.io/client-go v0.24.1 @@ -41,7 +41,7 @@ require ( ) require ( - cloud.google.com/go v0.99.0 // indirect + cloud.google.com/go/compute v1.7.0 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/adal v0.9.14 // indirect github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect @@ -81,10 +81,12 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-cmp v0.5.6 // indirect + github.com/google/go-cmp v0.5.8 // indirect github.com/google/gofuzz v1.1.0 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect - github.com/googleapis/gax-go/v2 v2.1.1 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect + github.com/googleapis/gax-go/v2 v2.4.0 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect @@ -113,15 +115,15 @@ require ( go.opencensus.io v0.23.0 // indirect golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f // indirect golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect - golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect - golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect + golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect + golang.org/x/sys v0.0.0-20220624220833-87e55d714810 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect golang.org/x/tools v0.1.10 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 // indirect + google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.62.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 09a7d2b26..ed73e834b 100644 --- a/go.sum +++ b/go.sum @@ -25,17 +25,26 @@ cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aD cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0 h1:y/cM2iqGgGi5D5DQZl6D9STN/3dR/Vx5Mp8s752oJTY= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -45,6 +54,7 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v59.3.0+incompatible h1:dPIm0BO4jsMXFcCI/sLTPkBtE7mk8WMuRHA0JeWhlcQ= github.com/Azure/azure-sdk-for-go v59.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -152,8 +162,12 @@ github.com/cloudflare/cloudflare-go v0.11.4/go.mod h1:ZB+hp7VycxPLpp0aiozQQezat4 github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= @@ -202,6 +216,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= @@ -422,8 +437,10 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -449,14 +466,22 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLe github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/gophercloud/gophercloud v0.20.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= github.com/gophercloud/gophercloud v0.24.0 h1:jDsIMGJ1KZpAjYfQgGI2coNQj5Q83oPzuiGJRFWgMzw= github.com/gophercloud/gophercloud v0.24.0/go.mod h1:Q8fZtyi5zZxPS/j9aj3sSxtvj41AdQMDwyo1myduD5c= @@ -945,8 +970,13 @@ golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -962,8 +992,13 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2 h1:+jnHzr9VPj32ykQVai5DNahi9+NSp7yYuCsl5eAQtL0= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -975,8 +1010,9 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170927054621-314a259e304f/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1058,9 +1094,17 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810 h1:rHZQSjJdAI4Xf5Qzeh2bBc5YJIkPFVM6oDtMFYmgws0= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= @@ -1158,8 +1202,11 @@ golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -1191,8 +1238,17 @@ google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0 h1:n2bqqK895ygnBpdPDYetfy23K7fJ22wsrZKCyfuRkkA= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.88.0 h1:MPwxQRqpyskYhr2iNyfsQ8R06eeyhe7UEuR30p136ZQ= +google.golang.org/api v0.88.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1243,6 +1299,7 @@ google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -1263,8 +1320,27 @@ google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 h1:Et6SkiuvnBn+SgrSYXs/BrUpGB4mbdwt4R3vaPIlicA= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f h1:hJ/Y5SqPXbarffmAsApliUlcvMU+wScNGfyop4bZm8o= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1292,8 +1368,12 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1307,8 +1387,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/controller/provider/google/execution.go b/pkg/controller/provider/google/execution.go index 2e37526da..7803c2199 100644 --- a/pkg/controller/provider/google/execution.go +++ b/pkg/controller/provider/google/execution.go @@ -17,11 +17,10 @@ package google import ( - "fmt" - "github.com/gardener/controller-manager-library/pkg/logger" "github.com/gardener/controller-manager-library/pkg/utils" googledns "google.golang.org/api/dns/v1" + "google.golang.org/api/googleapi" "github.com/gardener/external-dns-management/pkg/dns" "github.com/gardener/external-dns-management/pkg/dns/provider" @@ -38,6 +37,8 @@ type Execution struct { change *googledns.Change done []provider.DoneHandler + + routingPolicyChanges routingPolicyChanges } func NewExecution(logger logger.LogContext, h *Handler, zone provider.DNSHostedZone) *Execution { @@ -46,68 +47,115 @@ func NewExecution(logger logger.LogContext, h *Handler, zone provider.DNSHostedZ Deletions: []*googledns.ResourceRecordSet{}, } return &Execution{ - LogContext: logger, - handler: h, - zone: zone, - change: change, - done: []provider.DoneHandler{}, + LogContext: logger, + handler: h, + zone: zone, + change: change, + done: []provider.DoneHandler{}, + routingPolicyChanges: routingPolicyChanges{}, } } func (this *Execution) addChange(req *provider.ChangeRequest) { var setName dns.DNSSetName var newset, oldset *dns.RecordSet + var policy *googleRoutingPolicyData + var err error if req.Addition != nil { setName, newset = dns.MapToProvider(req.Type, req.Addition, this.zone.Domain()) - if req.Addition.RoutingPolicy != nil { - err := fmt.Errorf("Routing policies unsupported for " + TYPE_CODE) - if req.Done != nil { - req.Done.SetInvalid(err) - } - return - } + policy, err = extractRoutingPolicy(req.Addition) } if req.Deletion != nil { setName, oldset = dns.MapToProvider(req.Type, req.Deletion, this.zone.Domain()) + if req.Addition == nil { + policy, err = extractRoutingPolicy(req.Deletion) + } } - name := setName.DNSName - if name == "" || (newset.Length() == 0 && oldset.Length() == 0) { + if err != nil { + if req.Done != nil { + req.Done.SetInvalid(err) + } return } - name = dns.AlignHostname(name) + + if setName.DNSName == "" || (newset.Length() == 0 && oldset.Length() == 0) { + return + } + setName = setName.Align() switch req.Action { case provider.R_CREATE: - this.Infof("%s %s record set %s[%s]: %s(%d)", req.Action, req.Type, name, this.zone.Id(), newset.RecordString(), newset.TTL) - this.change.Additions = append(this.change.Additions, mapRecordSet(name, newset)) + this.Infof("%s %s record set %s[%s]: %s(%d)", req.Action, req.Type, setName, this.zone.Id(), newset.RecordString(), newset.TTL) + this.addAddition(mapRecordSet(setName, newset, policy), req.Done) case provider.R_DELETE: - this.Infof("%s %s record set %s[%s]: %s", req.Action, req.Type, name, this.zone.Id(), oldset.RecordString()) - this.change.Deletions = append(this.change.Deletions, mapRecordSet(name, oldset)) + this.Infof("%s %s record set %s[%s]: %s", req.Action, req.Type, setName, this.zone.Id(), oldset.RecordString()) + this.addDeletion(mapRecordSet(setName, oldset, policy), req.Done) case provider.R_UPDATE: - this.Infof("%s %s record set %s[%s]: %s(%d)", req.Action, req.Type, name, this.zone.Id(), newset.RecordString(), newset.TTL) - this.change.Deletions = append(this.change.Deletions, mapRecordSet(name, oldset)) - this.change.Additions = append(this.change.Additions, mapRecordSet(name, newset)) + this.Infof("%s %s record set %s[%s]: %s(%d)", req.Action, req.Type, setName, this.zone.Id(), newset.RecordString(), newset.TTL) + this.addDeletion(mapRecordSet(setName, oldset, policy), req.Done) + this.addAddition(mapRecordSet(setName, newset, policy), nil) } - this.done = append(this.done, req.Done) } -func (this *Execution) submitChanges(metrics provider.Metrics) error { - if len(this.change.Additions) == 0 && len(this.change.Deletions) == 0 { - return nil +func (this *Execution) addAddition(set *googledns.ResourceRecordSet, done provider.DoneHandler) { + if done != nil { + this.done = append(this.done, done) + } + if set.RoutingPolicy == nil { + this.change.Additions = append(this.change.Additions, set) + return + } + + this.routingPolicyChanges.addChange(set, true) +} + +func (this *Execution) addDeletion(set *googledns.ResourceRecordSet, done provider.DoneHandler) { + if done != nil { + this.done = append(this.done, done) + } + if set.RoutingPolicy == nil { + this.change.Deletions = append(this.change.Deletions, set) + return + } + + this.routingPolicyChanges.addChange(set, false) +} + +func (this *Execution) prepareSubmission(rrsetGetter rrsetGetterFunc) error { + routingPolicyDeletions, routingPolicyAdditions, err := this.routingPolicyChanges.calcDeletionsAndAdditions(rrsetGetter) + if err != nil { + return err } - this.Infof("processing changes for zone %s", this.zone.Id()) for _, c := range this.change.Deletions { this.Infof("desired change: Deletion %s %s: %s", c.Name, c.Type, utils.Strings(c.Rrdatas...)) } + for _, c := range routingPolicyDeletions { + this.Infof("desired change: Deletion %s %s (routing policy: %s)", c.Name, c.Type, describeRoutingPolicy(c)) + this.change.Deletions = append(this.change.Deletions, c) + } for _, c := range this.change.Additions { this.Infof("desired change: Addition %s %s: %s", c.Name, c.Type, utils.Strings(c.Rrdatas...)) } + for _, c := range routingPolicyAdditions { + this.Infof("desired change: Addition %s %s (routing policy: %s)", c.Name, c.Type, describeRoutingPolicy(c)) + this.change.Additions = append(this.change.Additions, c) + } + return nil +} - metrics.AddZoneRequests(this.zone.Id().ID, provider.M_UPDATERECORDS, 1) - this.handler.config.RateLimiter.Accept() +func (this *Execution) submitChanges(metrics provider.Metrics) error { + if len(this.change.Additions) == 0 && len(this.change.Deletions) == 0 && len(this.routingPolicyChanges) == 0 { + return nil + } + + this.Infof("processing changes for zone %s", this.zone.Id()) projectID, zoneName := SplitZoneID(this.zone.Id().ID) - if _, err := this.handler.service.Changes.Create(projectID, zoneName, this.change).Do(); err != nil { + rrsetGetter := func(name, typ string) (*googledns.ResourceRecordSet, error) { + return this.handler.getResourceRecordSet(projectID, zoneName, name, typ) + } + err := this.prepareSubmission(rrsetGetter) + if err != nil { this.Error(err) for _, d := range this.done { if d != nil { @@ -115,18 +163,37 @@ func (this *Execution) submitChanges(metrics provider.Metrics) error { } } return err - } else { + } + + metrics.AddZoneRequests(this.zone.Id().ID, provider.M_UPDATERECORDS, 1) + this.handler.config.RateLimiter.Accept() + if _, err := this.handler.service.Changes.Create(projectID, zoneName, this.change).Do(); err != nil { + this.Error(err) for _, d := range this.done { if d != nil { - d.Succeeded() + d.Failed(err) } } - this.Infof("%d records in zone %s were successfully updated", len(this.change.Additions)+len(this.change.Deletions), this.zone.Id()) - return nil + return err + } + + for _, d := range this.done { + if d != nil { + d.Succeeded() + } + } + this.Infof("%d records in zone %s were successfully updated", len(this.change.Additions)+len(this.change.Deletions), this.zone.Id()) + return nil +} + +func isNotFound(err error) bool { + if ge, ok := err.(*googleapi.Error); ok { + return ge.Code == 404 } + return false } -func mapRecordSet(dnsname string, rs *dns.RecordSet) *googledns.ResourceRecordSet { +func mapRecordSet(name dns.DNSSetName, rs *dns.RecordSet, policy *googleRoutingPolicyData) *googledns.ResourceRecordSet { targets := make([]string, len(rs.Records)) for i, r := range rs.Records { if rs.Type == dns.RS_CNAME { @@ -142,10 +209,12 @@ func mapRecordSet(dnsname string, rs *dns.RecordSet) *googledns.ResourceRecordSe ttl = rs.TTL } - return &googledns.ResourceRecordSet{ - Name: dnsname, + rrset := &googledns.ResourceRecordSet{ + Name: name.DNSName, Rrdatas: targets, Ttl: ttl, Type: rs.Type, } + rrset = mapPolicyRecordSet(rrset, policy) + return rrset } diff --git a/pkg/controller/provider/google/execution_test.go b/pkg/controller/provider/google/execution_test.go new file mode 100644 index 000000000..c4ee3a633 --- /dev/null +++ b/pkg/controller/provider/google/execution_test.go @@ -0,0 +1,447 @@ +/* + * Copyright 2022 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * + */ + +package google + +import ( + "fmt" + + "github.com/gardener/controller-manager-library/pkg/logger" + "github.com/gardener/external-dns-management/pkg/dns" + "github.com/gardener/external-dns-management/pkg/dns/provider" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" + "github.com/onsi/gomega/types" + googledns "google.golang.org/api/dns/v1" + "google.golang.org/api/googleapi" +) + +var _ = Describe("Execution", func() { + var ( + nameFunc = func(element interface{}) string { + return element.(*googledns.ResourceRecordSet).Name + } + + wrrStatus0 = func(name, typ string) (*googledns.ResourceRecordSet, error) { + switch name { + case "w1.example.org.": + return nil, &googleapi.Error{Code: 404} + case "w2.example.org.": + if typ == dns.RS_CNAME { + return &googledns.ResourceRecordSet{ + Name: name, + Type: typ, + RoutingPolicy: &googledns.RRSetRoutingPolicy{ + Wrr: &googledns.RRSetRoutingPolicyWrrPolicy{ + Items: []*googledns.RRSetRoutingPolicyWrrPolicyWrrPolicyItem{ + { + Rrdatas: []string{"some-other.example.org."}, + Weight: 1, + }, + { + Rrdatas: []string{rrDefaultValue(typ)}, + Weight: 0, + }, + { + Rrdatas: []string{"some.example.org."}, + Weight: 1, + }, + }, + }, + }, + }, nil + } + case "w3.example.org.": + if typ == dns.RS_TXT { + return &googledns.ResourceRecordSet{ + Name: name, + Type: typ, + RoutingPolicy: &googledns.RRSetRoutingPolicy{ + Wrr: &googledns.RRSetRoutingPolicyWrrPolicy{ + Items: []*googledns.RRSetRoutingPolicyWrrPolicyWrrPolicyItem{ + { + Rrdatas: []string{rrDefaultValue(typ)}, + Weight: 0, + }, + { + Rrdatas: []string{"\"bla\"", "\"foo\""}, + Weight: 1, + }, + }, + }, + }, + }, nil + } + } + return nil, fmt.Errorf("unexpected: %s %s", name, typ) + } + wrrStatus1 = func(name, typ string) (*googledns.ResourceRecordSet, error) { + switch name { + case "w1.example.org.": + if typ == dns.RS_A { + return &googledns.ResourceRecordSet{ + Name: name, + Type: typ, + RoutingPolicy: &googledns.RRSetRoutingPolicy{ + Wrr: &googledns.RRSetRoutingPolicyWrrPolicy{ + Items: []*googledns.RRSetRoutingPolicyWrrPolicyWrrPolicyItem{ + { + Rrdatas: []string{"4.4.4.4"}, + Weight: 4, + }, + }, + }, + }, + }, nil + } + case "w2.example.org.": + if typ == dns.RS_CNAME { + return &googledns.ResourceRecordSet{ + Name: name, + Type: typ, + RoutingPolicy: &googledns.RRSetRoutingPolicy{ + Wrr: &googledns.RRSetRoutingPolicyWrrPolicy{ + Items: []*googledns.RRSetRoutingPolicyWrrPolicyWrrPolicyItem{ + { + Rrdatas: []string{rrDefaultValue(typ)}, + Weight: 0, + }, + { + Rrdatas: []string{rrDefaultValue(typ)}, + Weight: 0, + }, + { + Rrdatas: []string{"some.example.org."}, + Weight: 1, + }, + }, + }, + }, + }, nil + } + case "w3.example.org.": + if typ == dns.RS_TXT { + return &googledns.ResourceRecordSet{ + Name: name, + Type: typ, + RoutingPolicy: &googledns.RRSetRoutingPolicy{ + Wrr: &googledns.RRSetRoutingPolicyWrrPolicy{ + Items: []*googledns.RRSetRoutingPolicyWrrPolicyWrrPolicyItem{ + { + Rrdatas: []string{"\"bar\""}, + Weight: 5, + }, + { + Rrdatas: []string{"\"bla\"", "\"foo\""}, + Weight: 1, + }, + }, + }, + }, + }, nil + } + } + return nil, fmt.Errorf("unexpected: %s %s", name, typ) + } + wrrStatus2 = func(name, typ string) (*googledns.ResourceRecordSet, error) { + switch name { + case "w1.example.org.": + if typ == dns.RS_A { + return &googledns.ResourceRecordSet{ + Name: name, + Type: typ, + RoutingPolicy: &googledns.RRSetRoutingPolicy{ + Wrr: &googledns.RRSetRoutingPolicyWrrPolicy{ + Items: []*googledns.RRSetRoutingPolicyWrrPolicyWrrPolicyItem{ + { + Rrdatas: []string{"4.4.4.4"}, + Weight: 4, + }, + { + Rrdatas: []string{rrDefaultValue(typ)}, + Weight: 0, + }, + { + Rrdatas: []string{"5.5.5.5"}, + Weight: 5, + }, + }, + }, + }, + }, nil + } + case "w4.example.org.": + if typ == dns.RS_AAAA { + return &googledns.ResourceRecordSet{ + Name: name, + Type: typ, + RoutingPolicy: &googledns.RRSetRoutingPolicy{ + Wrr: &googledns.RRSetRoutingPolicyWrrPolicy{ + Items: []*googledns.RRSetRoutingPolicyWrrPolicyWrrPolicyItem{ + { + Rrdatas: []string{rrDefaultValue(typ)}, + Weight: 0, + }, + { + Rrdatas: []string{rrDefaultValue(typ)}, + Weight: 0, + }, + { + Rrdatas: []string{"cef::1"}, + Weight: 1, + }, + }, + }, + }, + }, nil + } + } + return nil, fmt.Errorf("unexpected: %s %s", name, typ) + } + dnsset1 = makeDNSSet("x1.example.org", dns.RS_A, 301, "1.1.1.1") + dnsset2old = makeDNSSet("x2.example.org", dns.RS_A, 302, "1.1.1.2") + dnsset2new = makeDNSSet("x2.example.org", dns.RS_A, 303, "1.1.1.3") + dnsset4 = makeDNSSet("x4.example.org", dns.RS_A, 304, "1.1.1.4") + + dnssetwrr1_0 = makeDNSSetWrr("w1.example.org", 0, 10, dns.RS_A, "1.1.2.0") + dnssetwrr1_2 = makeDNSSetWrr("w1.example.org", 2, 12, dns.RS_A, "1.1.2.2") + dnssetwrr2_2old = makeDNSSetWrr("w2.example.org", 2, 1, dns.RS_CNAME, "some.example.org") + dnssetwrr2_2new = makeDNSSetWrr("w2.example.org", 2, 0, dns.RS_CNAME, "some.example.org") + dnssetwrr2_0new = makeDNSSetWrr("w2.example.org", 0, 0, dns.RS_CNAME, "some.example.org") + dnssetwrr3_1 = makeDNSSetWrr("w3.example.org", 1, 1, dns.RS_TXT, "bla", "foo") + dnssetwrr4_0 = makeDNSSetWrr("w4.example.org", 0, 1, dns.RS_AAAA, "a23::4") + dnssetwrr1_0b = makeDNSSetWrr("w1.example.org", 0, 4, dns.RS_A, "4.4.4.4") + dnssetwrr1_2b = makeDNSSetWrr("w1.example.org", 2, 5, dns.RS_A, "5.5.5.5") + dnssetwrr1_2bnew = makeDNSSetWrr("w1.example.org", 2, 0, dns.RS_A, "5.5.6.6") + ) + DescribeTable("Should prepare submission", func(reqs []*provider.ChangeRequest, rrsetGetter rrsetGetterFunc, changeMatcher types.GomegaMatcher) { + change, err := prepareSubmission(reqs, rrsetGetter) + if changeMatcher == nil { + Expect(err).To(HaveOccurred()) + } else { + Expect(err).To(Not(HaveOccurred())) + Expect(change).To(PointTo(changeMatcher)) + } + }, + Entry("fails for invalid index 5", + []*provider.ChangeRequest{ + {Action: provider.R_CREATE, Type: dns.RS_A, Addition: makeDNSSetWrr("w1.example.org", 5, 1, dns.RS_A, "4.4.4.4")}, + }, + nil, + nil, + ), + Entry("fails for invalid weight 0.2", + []*provider.ChangeRequest{ + {Action: provider.R_CREATE, Type: dns.RS_A, Addition: makeDNSSetWrrWrongWeight02("w1.example.org", 0, dns.RS_A, "1.1.2.0")}, + }, + nil, + nil, + ), + Entry("fails for missing weight parameter", + []*provider.ChangeRequest{ + {Action: provider.R_CREATE, Type: dns.RS_A, Addition: makeDNSSetWrrMissingWeight("w1.example.org", 0, dns.RS_A, "1.1.2.0")}, + }, + nil, + nil, + ), + Entry("prepares simple non-policy-routing change requests", + []*provider.ChangeRequest{ + {Action: provider.R_CREATE, Type: dns.RS_A, Addition: dnsset1}, + {Action: provider.R_UPDATE, Type: dns.RS_A, Addition: dnsset2new, Deletion: dnsset2old}, + {Action: provider.R_DELETE, Type: dns.RS_A, Deletion: dnsset4}, + }, + nil, + MatchFields(IgnoreExtras, Fields{ + "Deletions": MatchAllElements(nameFunc, Elements{ + "x2.example.org.": matchSimpleResourceRecordSet(dns.RS_A, 302, "1.1.1.2"), + "x4.example.org.": matchSimpleResourceRecordSet(dns.RS_A, 304, "1.1.1.4"), + }), + "Additions": MatchAllElements(nameFunc, Elements{ + "x1.example.org.": matchSimpleResourceRecordSet(dns.RS_A, 301, "1.1.1.1"), + "x2.example.org.": matchSimpleResourceRecordSet(dns.RS_A, 303, "1.1.1.3"), + }), + }), + ), + Entry("prepares weighted policy-routing change requests", + []*provider.ChangeRequest{ + {Action: provider.R_CREATE, Type: dns.RS_A, Addition: dnssetwrr1_0}, + {Action: provider.R_CREATE, Type: dns.RS_A, Addition: dnssetwrr1_2}, + {Action: provider.R_UPDATE, Type: dns.RS_CNAME, Addition: dnssetwrr2_2new, Deletion: dnssetwrr2_2old}, + {Action: provider.R_DELETE, Type: dns.RS_TXT, Deletion: dnssetwrr3_1}, + }, + wrrStatus0, + MatchFields(IgnoreExtras, Fields{ + "Deletions": MatchAllElements(nameFunc, Elements{ + "w2.example.org.": matchWrrResourceRecordSet(dns.RS_CNAME, matchWrrItem(1, "some-other.example.org."), matchWrrPlaceholderItem(dns.RS_CNAME), matchWrrItem(1, "some.example.org.")), + "w3.example.org.": matchWrrResourceRecordSet(dns.RS_TXT, matchWrrPlaceholderItem(dns.RS_TXT), matchWrrItem(1, "\"bla\"", "\"foo\"")), + }), + "Additions": MatchAllElements(nameFunc, Elements{ + "w1.example.org.": matchWrrResourceRecordSet(dns.RS_A, matchWrrItem(10, "1.1.2.0"), matchWrrPlaceholderItem(dns.RS_A), matchWrrItem(12, "1.1.2.2")), + "w2.example.org.": matchWrrResourceRecordSet(dns.RS_CNAME, matchWrrItem(1, "some-other.example.org."), matchWrrPlaceholderItem(dns.RS_CNAME), matchWrrItem(0, "some.example.org.")), + }), + }), + ), + Entry("prepares weighted policy-routing change requests (merging)", + []*provider.ChangeRequest{ + {Action: provider.R_CREATE, Type: dns.RS_A, Addition: dnssetwrr1_2}, + {Action: provider.R_DELETE, Type: dns.RS_CNAME, Deletion: dnssetwrr2_2old}, + {Action: provider.R_CREATE, Type: dns.RS_CNAME, Addition: dnssetwrr2_0new}, + {Action: provider.R_DELETE, Type: dns.RS_TXT, Deletion: dnssetwrr3_1}, + }, + wrrStatus1, + MatchFields(IgnoreExtras, Fields{ + "Deletions": MatchAllElements(nameFunc, Elements{ + "w1.example.org.": matchWrrResourceRecordSet(dns.RS_A, matchWrrItem(4, "4.4.4.4")), + "w2.example.org.": matchWrrResourceRecordSet(dns.RS_CNAME, matchWrrPlaceholderItem(dns.RS_CNAME), matchWrrPlaceholderItem(dns.RS_CNAME), matchWrrItem(1, "some.example.org.")), + "w3.example.org.": matchWrrResourceRecordSet(dns.RS_TXT, matchWrrItem(5, "\"bar\""), matchWrrItem(1, "\"bla\"", "\"foo\"")), + }), + "Additions": MatchAllElements(nameFunc, Elements{ + "w1.example.org.": matchWrrResourceRecordSet(dns.RS_A, matchWrrItem(4, "4.4.4.4"), matchWrrPlaceholderItem(dns.RS_A), matchWrrItem(12, "1.1.2.2")), + "w2.example.org.": matchWrrResourceRecordSet(dns.RS_CNAME, matchWrrItem(0, "some.example.org.")), + "w3.example.org.": matchWrrResourceRecordSet(dns.RS_TXT, matchWrrItem(5, "\"bar\"")), + }), + }), + ), + Entry("prepares weighted policy-routing change requests (merging2)", + []*provider.ChangeRequest{ + {Action: provider.R_CREATE, Type: dns.RS_AAAA, Addition: dnssetwrr4_0}, + {Action: provider.R_DELETE, Type: dns.RS_A, Deletion: dnssetwrr1_0b}, + {Action: provider.R_UPDATE, Type: dns.RS_A, Addition: dnssetwrr1_2bnew, Deletion: dnssetwrr1_2b}, + }, + wrrStatus2, + MatchFields(IgnoreExtras, Fields{ + "Deletions": MatchAllElements(nameFunc, Elements{ + "w1.example.org.": matchWrrResourceRecordSet(dns.RS_A, matchWrrItem(4, "4.4.4.4"), matchWrrPlaceholderItem(dns.RS_A), matchWrrItem(5, "5.5.5.5")), + "w4.example.org.": matchWrrResourceRecordSet(dns.RS_AAAA, matchWrrPlaceholderItem(dns.RS_AAAA), matchWrrPlaceholderItem(dns.RS_AAAA), matchWrrItem(1, "cef::1")), + }), + "Additions": MatchAllElements(nameFunc, Elements{ + "w1.example.org.": matchWrrResourceRecordSet(dns.RS_A, matchWrrPlaceholderItem(dns.RS_A), matchWrrPlaceholderItem(dns.RS_A), matchWrrItem(0, "5.5.6.6")), + "w4.example.org.": matchWrrResourceRecordSet(dns.RS_AAAA, matchWrrItem(1, "a23::4"), matchWrrPlaceholderItem(dns.RS_AAAA), matchWrrItem(1, "cef::1")), + }), + }), + ), + ) +}) + +func makeDNSSet(dnsName, typ string, ttl int64, targets ...string) *dns.DNSSet { + set := dns.NewDNSSet(dns.DNSSetName{DNSName: dnsName}, nil) + set.SetRecordSet(typ, ttl, targets...) + return set +} + +func makeDNSSetWrr(dnsName string, index, weight int, typ string, targets ...string) *dns.DNSSet { + policy := &dns.RoutingPolicy{ + Type: dns.RoutingPolicyWeighted, + Parameters: map[string]string{"weight": fmt.Sprintf("%d", weight)}, + } + set := dns.NewDNSSet(dns.DNSSetName{DNSName: dnsName, SetIdentifier: fmt.Sprintf("%d", index)}, policy) + set.SetRecordSet(typ, 300, targets...) + return set +} + +func makeDNSSetWrrWrongWeight02(dnsName string, index int, typ string, targets ...string) *dns.DNSSet { + set := makeDNSSetWrr(dnsName, index, 0, typ, targets...) + set.RoutingPolicy.Parameters["weight"] = "0.2" + return set +} + +func makeDNSSetWrrMissingWeight(dnsName string, index int, typ string, targets ...string) *dns.DNSSet { + set := makeDNSSetWrr(dnsName, index, 0, typ, targets...) + delete(set.RoutingPolicy.Parameters, "weight") + return set +} + +func matchSimpleResourceRecordSet(typ string, ttl int64, targets ...string) types.GomegaMatcher { + return PointTo(MatchFields(IgnoreExtras, Fields{ + "Type": Equal(typ), + "Ttl": Equal(ttl), + "Rrdatas": Equal(targets), + })) +} + +func itemNameFunc(index int, element interface{}) string { + return fmt.Sprintf("%d", index) +} + +func matchWrrResourceRecordSet(typ string, items ...types.GomegaMatcher) types.GomegaMatcher { + elements := Elements{} + for i, item := range items { + elements[fmt.Sprintf("%d", i)] = item + } + + return PointTo(MatchFields(IgnoreExtras, Fields{ + "Type": Equal(typ), + "RoutingPolicy": PointTo(MatchFields(IgnoreExtras, Fields{ + "Wrr": PointTo(MatchFields(IgnoreExtras, Fields{ + "Items": MatchAllElementsWithIndex(itemNameFunc, elements), + })), + })), + })) +} + +func matchWrrItem(weight int, targets ...string) types.GomegaMatcher { + return PointTo(MatchFields(IgnoreExtras, Fields{ + "Weight": Equal(float64(weight)), + "Rrdatas": Equal(targets), + })) +} + +func matchWrrPlaceholderItem(typ string) types.GomegaMatcher { + return PointTo(MatchFields(IgnoreExtras, Fields{ + "Weight": Equal(float64(0)), + "Rrdatas": Equal([]string{rrDefaultValue(typ)}), + })) +} + +func prepareSubmission(reqs []*provider.ChangeRequest, rrsetGetter rrsetGetterFunc) (*googledns.Change, error) { + log := logger.NewContext("", "TestEnv") + zone := provider.NewDNSHostedZone(TYPE_CODE, "test", "example.org", "", nil, false) + doneHandler := &testDoneHandler{} + exec := NewExecution(log, nil, zone) + for _, r := range reqs { + r.Done = doneHandler + exec.addChange(r) + } + if doneHandler.invalidCount > 0 { + return nil, fmt.Errorf("errors: %d, last message: %s", doneHandler.invalidCount, doneHandler.lastMessage) + } + if err := exec.prepareSubmission(rrsetGetter); err != nil { + return nil, err + } + return exec.change, nil +} + +type testDoneHandler struct { + invalidCount int + failedCount int + lastMessage string +} + +var _ provider.DoneHandler = &testDoneHandler{} + +func (h *testDoneHandler) SetInvalid(err error) { + h.invalidCount++ + h.lastMessage = err.Error() +} + +func (h *testDoneHandler) Failed(err error) { + h.failedCount++ + h.lastMessage = err.Error() +} + +func (h *testDoneHandler) Throttled() {} +func (h *testDoneHandler) Succeeded() {} diff --git a/pkg/controller/provider/google/google_suite_test.go b/pkg/controller/provider/google/google_suite_test.go new file mode 100644 index 000000000..19421b2f6 --- /dev/null +++ b/pkg/controller/provider/google/google_suite_test.go @@ -0,0 +1,30 @@ +/* + * Copyright 2022 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * + */ + +package google + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestIntegration(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Google CloudDNS Suite") +} diff --git a/pkg/controller/provider/google/handler.go b/pkg/controller/provider/google/handler.go index c3c600b65..4df9fe72b 100644 --- a/pkg/controller/provider/google/handler.go +++ b/pkg/controller/provider/google/handler.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "net/http" + "strconv" "strings" "k8s.io/client-go/util/flowcontrol" @@ -47,6 +48,8 @@ type Handler struct { rateLimiter flowcontrol.RateLimiter } +const epsilon = 0.00001 + var _ provider.DNSHandler = &Handler{} func NewHandler(config *provider.DNSHandlerConfig) (provider.DNSHandler, error) { @@ -178,11 +181,31 @@ func (h *Handler) getZoneState(zone provider.DNSHostedZone, cache provider.ZoneC f := func(r *googledns.ResourceRecordSet) { if dns.SupportedRecordType(r.Type) { - rs := dns.NewRecordSet(r.Type, r.Ttl, nil) - for _, rr := range r.Rrdatas { - rs.Add(&dns.Record{Value: rr}) + if len(r.Rrdatas) > 0 { + rs := dns.NewRecordSet(r.Type, r.Ttl, nil) + for _, rr := range r.Rrdatas { + rs.Add(&dns.Record{Value: rr}) + } + dnssets.AddRecordSetFromProvider(r.Name, rs) + } else if r.RoutingPolicy != nil && r.RoutingPolicy.Wrr != nil { + for _, item := range r.RoutingPolicy.Wrr.Items { + if int64(item.Weight+epsilon)*10 != int64(item.Weight*10+epsilon) { + return // foreign as managed recordsets only use integral weights + } + } + for i, item := range r.RoutingPolicy.Wrr.Items { + if isWrrPlaceHolderItem(r.Type, item) { + continue + } + rs := dns.NewRecordSet(r.Type, r.Ttl, nil) + for _, rr := range item.Rrdatas { + rs.Add(&dns.Record{Value: rr}) + } + dnsSetName := dns.DNSSetName{DNSName: r.Name, SetIdentifier: fmt.Sprintf("%d", i)} + policy := dns.NewRoutingPolicy(dns.RoutingPolicyWeighted, "weight", strconv.FormatInt(int64(item.Weight+epsilon), 10)) + dnssets.AddRecordSetFromProviderEx(dnsSetName, policy, rs) + } } - dnssets.AddRecordSetFromProvider(r.Name, rs) } } @@ -211,7 +234,7 @@ func (h *Handler) executeRequests(logger logger.LogContext, zone provider.DNSHos exec.addChange(r) } if h.config.DryRun { - logger.Infof("no changes in dryrun mode for AWS") + logger.Infof("no changes in dryrun mode for Google") return nil } return exec.submitChanges(h.config.Metrics) @@ -221,6 +244,12 @@ func (h *Handler) makeZoneID(name string) string { return fmt.Sprintf("%s/%s", h.credentials.ProjectID, name) } +func (h *Handler) getResourceRecordSet(project, managedZone, name, typ string) (*googledns.ResourceRecordSet, error) { + h.config.RateLimiter.Accept() + h.config.Metrics.AddGenericRequests("getrecordset", 1) + return h.service.ResourceRecordSets.Get(project, managedZone, name, typ).Do() +} + // SplitZoneID splits the zone id into project id and zone name func SplitZoneID(id string) (string, string) { parts := strings.SplitN(id, "/", 2) diff --git a/pkg/controller/provider/google/routingpolicy.go b/pkg/controller/provider/google/routingpolicy.go new file mode 100644 index 000000000..61f7788bd --- /dev/null +++ b/pkg/controller/provider/google/routingpolicy.go @@ -0,0 +1,225 @@ +/* + * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * + */ + +package google + +import ( + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/gardener/external-dns-management/pkg/dns" + googledns "google.golang.org/api/dns/v1" +) + +const routingPolicyMaxIndices = 5 + +type googleRoutingPolicyData struct { + index int + weight int64 +} + +type dnsname = string +type dnstype = string + +type routingPolicyChanges map[dnsname]map[dnstype]*googledns.ResourceRecordSet + +type rrsetGetterFunc func(name, typ string) (*googledns.ResourceRecordSet, error) + +var _deleted_marker_ = &googledns.RRSetRoutingPolicyWrrPolicyWrrPolicyItem{} + +func (c routingPolicyChanges) addChange(set *googledns.ResourceRecordSet, add bool) { + perType := c[set.Name] + if perType == nil { + perType = map[dnstype]*googledns.ResourceRecordSet{} + } + current := perType[set.Type] + if current == nil { + current = &googledns.ResourceRecordSet{ + Name: set.Name, + RoutingPolicy: &googledns.RRSetRoutingPolicy{ + Wrr: &googledns.RRSetRoutingPolicyWrrPolicy{}, + }, + Ttl: set.Ttl, + Type: set.Type, + } + } + index := len(set.RoutingPolicy.Wrr.Items) - 1 + for len(current.RoutingPolicy.Wrr.Items) <= index { + current.RoutingPolicy.Wrr.Items = append(current.RoutingPolicy.Wrr.Items, nil) + } + if add { + current.RoutingPolicy.Wrr.Items[index] = set.RoutingPolicy.Wrr.Items[index] + } else { + current.RoutingPolicy.Wrr.Items[index] = _deleted_marker_ + } + perType[set.Type] = current + c[set.Name] = perType +} + +func (c routingPolicyChanges) calcDeletionsAndAdditions(rrsetGetter rrsetGetterFunc) (deletions []*googledns.ResourceRecordSet, additions []*googledns.ResourceRecordSet, err error) { + for name, perType := range c { + for typ, rrset := range perType { + old, err2 := rrsetGetter(name, typ) + if err2 == nil { + deletions = append(deletions, old) + for i, item := range old.RoutingPolicy.Wrr.Items { + if i < len(rrset.RoutingPolicy.Wrr.Items) { + if rrset.RoutingPolicy.Wrr.Items[i] == nil { + rrset.RoutingPolicy.Wrr.Items[i] = item + } + } else { + rrset.RoutingPolicy.Wrr.Items = append(rrset.RoutingPolicy.Wrr.Items, item) + } + } + } else if !isNotFound(err2) { + err = err2 + return + } + + max := len(rrset.RoutingPolicy.Wrr.Items) - 1 + for i := len(rrset.RoutingPolicy.Wrr.Items) - 1; i >= 0; i-- { + if rrset.RoutingPolicy.Wrr.Items[i] == _deleted_marker_ { + if max == i { + rrset.RoutingPolicy.Wrr.Items = rrset.RoutingPolicy.Wrr.Items[:i] + max = i - 1 + } else { + rrset.RoutingPolicy.Wrr.Items[i] = createWrrPlaceHolderItem(typ) + } + } else if rrset.RoutingPolicy.Wrr.Items[i] == nil { + rrset.RoutingPolicy.Wrr.Items[i] = createWrrPlaceHolderItem(typ) + } else if max == i && isWrrPlaceHolderItem(typ, rrset.RoutingPolicy.Wrr.Items[i]) { + rrset.RoutingPolicy.Wrr.Items = rrset.RoutingPolicy.Wrr.Items[:i] + max = i - 1 + } + } + if len(rrset.RoutingPolicy.Wrr.Items) > 0 { + additions = append(additions, rrset) + } + } + } + err = nil + return +} + +func extractRoutingPolicy(set *dns.DNSSet) (*googleRoutingPolicyData, error) { + if set.Name.SetIdentifier == "" && set.RoutingPolicy == nil { + return nil, nil + } + if set.Name.SetIdentifier == "" { + return nil, fmt.Errorf("missing set identifier") + } + if set.RoutingPolicy == nil { + return nil, fmt.Errorf("missing routing policy") + } + index, err := strconv.Atoi(set.Name.SetIdentifier) + if index < 0 || index >= routingPolicyMaxIndices || err != nil { + return nil, fmt.Errorf("For %s, the setIdentifier must be an number >= 0 and < %d, but got: %s", TYPE_CODE, routingPolicyMaxIndices, set.Name.SetIdentifier) + } + var keys []string + switch set.RoutingPolicy.Type { + case dns.RoutingPolicyWeighted: + keys = []string{"weight"} + default: + return nil, fmt.Errorf("unsupported routing policy: %s", set.RoutingPolicy.Type) + } + + if err := set.RoutingPolicy.CheckParameterKeys(keys); err != nil { + return nil, err + } + + var weight int64 + for key, value := range set.RoutingPolicy.Parameters { + switch key { + case "weight": + v, err := strconv.ParseInt(value, 10, 64) + if err != nil || v < 0 { + return nil, fmt.Errorf("invalid value for spec.routingPolicy.parameters.weight: %s (only non-negative integers are allowed)", value) + } + weight = v + } + } + + return &googleRoutingPolicyData{ + index: index, + weight: weight, + }, nil +} + +func mapPolicyRecordSet(rrset *googledns.ResourceRecordSet, data *googleRoutingPolicyData) *googledns.ResourceRecordSet { + if data == nil { + return rrset + } + + items := make([]*googledns.RRSetRoutingPolicyWrrPolicyWrrPolicyItem, data.index+1) + items[data.index] = &googledns.RRSetRoutingPolicyWrrPolicyWrrPolicyItem{ + Rrdatas: rrset.Rrdatas, + Weight: float64(data.weight), + } + + return &googledns.ResourceRecordSet{ + Name: rrset.Name, + RoutingPolicy: &googledns.RRSetRoutingPolicy{ + Wrr: &googledns.RRSetRoutingPolicyWrrPolicy{ + Items: items, + }, + }, + Ttl: rrset.Ttl, + Type: rrset.Type, + } +} + +func describeRoutingPolicy(rrset *googledns.ResourceRecordSet) string { + if rrset.RoutingPolicy == nil || rrset.RoutingPolicy.Wrr == nil { + return "" + } + buf := new(bytes.Buffer) + for i, item := range rrset.RoutingPolicy.Wrr.Items { + if !isWrrPlaceHolderItem(rrset.Type, item) { + buf.WriteString(fmt.Sprintf("[%d]%.1f:%s;", i, item.Weight, strings.Join(item.Rrdatas, ","))) + } + } + return buf.String() +} + +func createWrrPlaceHolderItem(typ string) *googledns.RRSetRoutingPolicyWrrPolicyWrrPolicyItem { + return &googledns.RRSetRoutingPolicyWrrPolicyWrrPolicyItem{ + Rrdatas: []string{rrDefaultValue(typ)}, + Weight: 0, + } +} + +func isWrrPlaceHolderItem(typ string, item *googledns.RRSetRoutingPolicyWrrPolicyWrrPolicyItem) bool { + return item.Weight == 0 && len(item.Rrdatas) == 1 && item.Rrdatas[0] == rrDefaultValue(typ) +} + +func rrDefaultValue(typ string) string { + switch typ { + case dns.RS_TXT: + return "\"__dummy__\"" + case dns.RS_A: + // use dummy documentation IP address + return "233.252.0.1" + case dns.RS_CNAME: + return "dummy.dummy.dummy.com." + case dns.RS_AAAA: + // use dummy documentation IP address + return "2001:db8::1" + default: + return typ + "?" + } +} diff --git a/pkg/dns/dnsset.go b/pkg/dns/dnsset.go index e13dca594..d07a81091 100644 --- a/pkg/dns/dnsset.go +++ b/pkg/dns/dnsset.go @@ -78,6 +78,12 @@ func (dnssets DNSSets) AddRecordSet(name DNSSetName, policy *RoutingPolicy, rs * dnssets[name] = dnsset } dnsset.Sets[rs.Type] = rs + if rs.Type == RS_CNAME { + for i := range rs.Records { + rs.Records[i].Value = NormalizeHostname(rs.Records[i].Value) + } + } + dnsset.RoutingPolicy = policy } func (dnssets DNSSets) RemoveRecordSet(name DNSSetName, recordSetType string) { diff --git a/test/functional/routingpolicies.go b/test/functional/routingpolicies.go index 18a9c1316..673757a73 100644 --- a/test/functional/routingpolicies.go +++ b/test/functional/routingpolicies.go @@ -70,7 +70,7 @@ spec: {{ end }} routingPolicy: type: {{$policy.Type}} - setIdentifier: {{$id}} + setIdentifier: '{{$id}}' parameters: {{ range $pk, $pv := $policy.Parameters }} {{$pk}}: '{{$pv}}' diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/compute/LICENSE similarity index 100% rename from vendor/cloud.google.com/go/LICENSE rename to vendor/cloud.google.com/go/compute/LICENSE diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 5dbe77cc7..1405d0967 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -16,7 +16,7 @@ // metadata and API service accounts. // // This package is a wrapper around the GCE metadata service, -// as documented at https://developers.google.com/compute/docs/metadata. +// as documented at https://cloud.google.com/compute/docs/metadata/overview. package metadata // import "cloud.google.com/go/compute/metadata" import ( @@ -61,14 +61,18 @@ var ( instID = &cachedValue{k: "instance/id", trim: true} ) -var defaultClient = &Client{hc: &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - }, -}} +var defaultClient = &Client{hc: newDefaultHTTPClient()} + +func newDefaultHTTPClient() *http.Client { + return &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + } +} // NotDefinedError is returned when requested metadata is not defined. // @@ -130,7 +134,7 @@ func testOnGCE() bool { go func() { req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) req.Header.Set("User-Agent", userAgent) - res, err := defaultClient.hc.Do(req.WithContext(ctx)) + res, err := newDefaultHTTPClient().Do(req.WithContext(ctx)) if err != nil { resc <- false return @@ -140,7 +144,8 @@ func testOnGCE() bool { }() go func() { - addrs, err := net.DefaultResolver.LookupHost(ctx, "metadata.google.internal") + resolver := &net.Resolver{} + addrs, err := resolver.LookupHost(ctx, "metadata.google.internal") if err != nil || len(addrs) == 0 { resc <- false return diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go new file mode 100644 index 000000000..60e82caa9 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go @@ -0,0 +1,524 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONUnmarshalV2 = false + +// UnmarshalNext unmarshals the next JSON object from d into m. +func UnmarshalNext(d *json.Decoder, m proto.Message) error { + return new(Unmarshaler).UnmarshalNext(d, m) +} + +// Unmarshal unmarshals a JSON object from r into m. +func Unmarshal(r io.Reader, m proto.Message) error { + return new(Unmarshaler).Unmarshal(r, m) +} + +// UnmarshalString unmarshals a JSON object from s into m. +func UnmarshalString(s string, m proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(s), m) +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // AllowUnknownFields specifies whether to allow messages to contain + // unknown JSON fields, as opposed to failing to unmarshal. + AllowUnknownFields bool + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize the way +// they are unmarshaled from JSON. Messages that implement this should also +// implement JSONPBMarshaler so that the custom format can be produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Unmarshal unmarshals a JSON object from r into m. +func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error { + return u.UnmarshalNext(json.NewDecoder(r), m) +} + +// UnmarshalNext unmarshals the next JSON object from d into m. +func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error { + if m == nil { + return errors.New("invalid nil message") + } + + // Parse the next JSON object from the stream. + raw := json.RawMessage{} + if err := d.Decode(&raw); err != nil { + return err + } + + // Check for custom unmarshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsu, ok := m.(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, raw) + } + + mr := proto.MessageReflect(m) + + // NOTE: For historical reasons, a top-level null is treated as a noop. + // This is incorrect, but kept for compatibility. + if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" { + return nil + } + + if wrapJSONUnmarshalV2 { + // NOTE: If input message is non-empty, we need to preserve merge semantics + // of the old jsonpb implementation. These semantics are not supported by + // the protobuf JSON specification. + isEmpty := true + mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool { + isEmpty = false // at least one iteration implies non-empty + return false + }) + if !isEmpty { + // Perform unmarshaling into a newly allocated, empty message. + mr = mr.New() + + // Use a defer to copy all unmarshaled fields into the original message. + dst := proto.MessageReflect(m) + defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + dst.Set(fd, v) + return true + }) + } + + // Unmarshal using the v2 JSON unmarshaler. + opts := protojson.UnmarshalOptions{ + DiscardUnknown: u.AllowUnknownFields, + } + if u.AnyResolver != nil { + opts.Resolver = anyResolver{u.AnyResolver} + } + return opts.Unmarshal(raw, mr.Interface()) + } else { + if err := u.unmarshalMessage(mr, raw); err != nil { + return err + } + return protoV2.CheckInitialized(mr.Interface()) + } +} + +func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error { + md := m.Descriptor() + fds := md.Fields() + + if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, in) + } + + if string(in) == "null" && md.FullName() != "google.protobuf.Value" { + return nil + } + + switch wellKnownType(md.FullName()) { + case "Any": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + rawTypeURL, ok := jsonObject["@type"] + if !ok { + return errors.New("Any JSON doesn't have '@type'") + } + typeURL, err := unquoteString(string(rawTypeURL)) + if err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL) + } + m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL)) + + var m2 protoreflect.Message + if u.AnyResolver != nil { + mi, err := u.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + if err == protoregistry.NotFound { + return fmt.Errorf("could not resolve Any message type: %v", typeURL) + } + return err + } + m2 = mt.New() + } + + if wellKnownType(m2.Descriptor().FullName()) != "" { + rawValue, ok := jsonObject["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + if err := u.unmarshalMessage(m2, rawValue); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } else { + delete(jsonObject, "@type") + rawJSON, err := json.Marshal(jsonObject) + if err != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) + } + if err = u.unmarshalMessage(m2, rawJSON); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } + + rawWire, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire)) + return nil + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + v, err := u.unmarshalValue(m.NewField(fd), in, fd) + if err != nil { + return err + } + m.Set(fd, v) + return nil + case "Duration": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + d, err := time.ParseDuration(v) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + sec := d.Nanoseconds() / 1e9 + nsec := d.Nanoseconds() % 1e9 + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Timestamp": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + t, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + sec := t.Unix() + nsec := t.Nanosecond() + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Value": + switch { + case string(in) == "null": + m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0)) + case string(in) == "true": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true)) + case string(in) == "false": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false)) + case hasPrefixAndSuffix('"', in, '"'): + s, err := unquoteString(string(in)) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s)) + case hasPrefixAndSuffix('[', in, ']'): + v := m.Mutable(fds.ByNumber(6)) + return u.unmarshalMessage(v.Message(), in) + case hasPrefixAndSuffix('{', in, '}'): + v := m.Mutable(fds.ByNumber(5)) + return u.unmarshalMessage(v.Message(), in) + default: + f, err := strconv.ParseFloat(string(in), 0) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f)) + } + return nil + case "ListValue": + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + lv := m.Mutable(fds.ByNumber(1)).List() + for _, raw := range jsonArray { + ve := lv.NewElement() + if err := u.unmarshalMessage(ve.Message(), raw); err != nil { + return err + } + lv.Append(ve) + } + return nil + case "Struct": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + + mv := m.Mutable(fds.ByNumber(1)).Map() + for key, raw := range jsonObject { + kv := protoreflect.ValueOf(key).MapKey() + vv := mv.NewValue() + if err := u.unmarshalMessage(vv.Message(), raw); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", key, err) + } + mv.Set(kv, vv) + } + return nil + } + + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + // Handle known fields. + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if fd.IsWeak() && fd.Message().IsPlaceholder() { + continue // weak reference is not linked in + } + + // Search for any raw JSON value associated with this field. + var raw json.RawMessage + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + name = string(fd.JSONName()) + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + // Handle extension fields. + for name, raw := range jsonObject { + if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") { + continue + } + + // Resolve the extension field by name. + xname := protoreflect.FullName(name[len("[") : len(name)-len("]")]) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(md) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + continue + } + delete(jsonObject, name) + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName()) + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + if !u.AllowUnknownFields && len(jsonObject) > 0 { + for name := range jsonObject { + return fmt.Errorf("unknown field %q in %v", name, md.FullName()) + } + } + return nil +} + +func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { + if md := fd.Message(); md != nil { + return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated + } + return false +} + +func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool { + if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated { + _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler) + return ok + } + return false +} + +func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch { + case fd.IsList(): + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return v, err + } + lv := v.List() + for _, raw := range jsonArray { + ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd) + if err != nil { + return v, err + } + lv.Append(ve) + } + return v, nil + case fd.IsMap(): + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return v, err + } + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + for key, raw := range jsonObject { + var kv protoreflect.MapKey + if kfd.Kind() == protoreflect.StringKind { + kv = protoreflect.ValueOf(key).MapKey() + } else { + v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd) + if err != nil { + return v, err + } + kv = v.MapKey() + } + + vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd) + if err != nil { + return v, err + } + mv.Set(kv, vv) + } + return v, nil + default: + return u.unmarshalSingularValue(v, in, fd) + } +} + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(+1), + `"-Infinity"`: math.Inf(-1), +} + +func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch fd.Kind() { + case protoreflect.BoolKind: + return unmarshalValue(in, new(bool)) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return unmarshalValue(trimQuote(in), new(int32)) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return unmarshalValue(trimQuote(in), new(int64)) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return unmarshalValue(trimQuote(in), new(uint32)) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return unmarshalValue(trimQuote(in), new(uint64)) + case protoreflect.FloatKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat32(float32(f)), nil + } + return unmarshalValue(trimQuote(in), new(float32)) + case protoreflect.DoubleKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat64(float64(f)), nil + } + return unmarshalValue(trimQuote(in), new(float64)) + case protoreflect.StringKind: + return unmarshalValue(in, new(string)) + case protoreflect.BytesKind: + return unmarshalValue(in, new([]byte)) + case protoreflect.EnumKind: + if hasPrefixAndSuffix('"', in, '"') { + vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in))) + if vd == nil { + return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName()) + } + return protoreflect.ValueOfEnum(vd.Number()), nil + } + return unmarshalValue(in, new(protoreflect.EnumNumber)) + case protoreflect.MessageKind, protoreflect.GroupKind: + err := u.unmarshalMessage(v.Message(), in) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } +} + +func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) { + err := json.Unmarshal(in, v) + return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err +} + +func unquoteString(in string) (out string, err error) { + err = json.Unmarshal([]byte(in), &out) + return out, err +} + +func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool { + if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix { + return true + } + return false +} + +// trimQuote is like unquoteString but simply strips surrounding quotes. +// This is incorrect, but is behavior done by the legacy implementation. +func trimQuote(in []byte) []byte { + if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' { + in = in[1 : len(in)-1] + } + return in +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go new file mode 100644 index 000000000..685c80a62 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go @@ -0,0 +1,559 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONMarshalV2 = false + +// Marshaler is a configurable object for marshaling protocol buffer messages +// to the specified JSON representation. +type Marshaler struct { + // OrigName specifies whether to use the original protobuf name for fields. + OrigName bool + + // EnumsAsInts specifies whether to render enum values as integers, + // as opposed to string values. + EnumsAsInts bool + + // EmitDefaults specifies whether to render fields with zero values. + EmitDefaults bool + + // Indent controls whether the output is compact or not. + // If empty, the output is compact JSON. Otherwise, every JSON object + // entry and JSON array value will be on its own line. + // Each line will be preceded by repeated copies of Indent, where the + // number of copies is the current indentation depth. + Indent string + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should also +// implement JSONPBUnmarshaler so that the custom format can be parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// Marshal serializes a protobuf message as JSON into w. +func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error { + b, err := jm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// MarshalToString serializes a protobuf message as JSON in string form. +func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) { + b, err := jm.marshal(m) + if err != nil { + return "", err + } + return string(b), nil +} + +func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) { + v := reflect.ValueOf(m) + if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, errors.New("Marshal called with nil") + } + + // Check for custom marshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsm, ok := m.(JSONPBMarshaler); ok { + return jsm.MarshalJSONPB(jm) + } + + if wrapJSONMarshalV2 { + opts := protojson.MarshalOptions{ + UseProtoNames: jm.OrigName, + UseEnumNumbers: jm.EnumsAsInts, + EmitUnpopulated: jm.EmitDefaults, + Indent: jm.Indent, + } + if jm.AnyResolver != nil { + opts.Resolver = anyResolver{jm.AnyResolver} + } + return opts.Marshal(proto.MessageReflect(m).Interface()) + } else { + // Check for unpopulated required fields first. + m2 := proto.MessageReflect(m) + if err := protoV2.CheckInitialized(m2.Interface()); err != nil { + return nil, err + } + + w := jsonWriter{Marshaler: jm} + err := w.marshalMessage(m2, "", "") + return w.buf, err + } +} + +type jsonWriter struct { + *Marshaler + buf []byte +} + +func (w *jsonWriter) write(s string) { + w.buf = append(w.buf, s...) +} + +func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error { + if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(w.Marshaler) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if b, err = json.Marshal(js); err != nil { + return err + } + } + w.write(string(b)) + return nil + } + + md := m.Descriptor() + fds := md.Fields() + + // Handle well-known types. + const secondInNanos = int64(time.Second / time.Nanosecond) + switch wellKnownType(md.FullName()) { + case "Any": + return w.marshalAny(m, indent) + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + case "Duration": + const maxSecondsInDuration = 315576000000 + // "Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + var sign string + if s < 0 || ns < 0 { + sign, s, ns = "-", -1*s, -1*ns + } + x := fmt.Sprintf("%s%d.%09d", sign, s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vs"`, x)) + return nil + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vZ"`, x)) + return nil + case "Value": + // JSON value; which is a null, number, string, bool, object, or array. + od := md.Oneofs().Get(0) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("nil Value") + } + return w.marshalValue(fd, m.Get(fd), indent) + case "Struct", "ListValue": + // JSON object or array. + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + + firstField := true + if typeURL != "" { + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + if fd == nil { + continue + } + } else { + i++ + } + + v := m.Get(fd) + + if !m.Has(fd) { + if !w.EmitDefaults || fd.ContainingOneof() != nil { + continue + } + if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) { + v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars + } + } + + if !firstField { + w.writeComma() + } + if err := w.marshalField(fd, v, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if md.ExtensionRanges().Len() > 0 { + // Collect a sorted list of all extension descriptor and values. + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + if !firstField { + w.writeComma() + } + if err := w.marshalField(ext.desc, ext.val, indent); err != nil { + return err + } + firstField = false + } + } + + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) writeComma() { + if w.Indent != "" { + w.write(",\n") + } else { + w.write(",") + } +} + +func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + md := m.Descriptor() + typeURL := m.Get(md.Fields().ByNumber(1)).String() + rawVal := m.Get(md.Fields().ByNumber(2)).Bytes() + + var m2 protoreflect.Message + if w.AnyResolver != nil { + mi, err := w.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + return err + } + m2 = mt.New() + } + + if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil { + return err + } + + if wellKnownType(m2.Descriptor().FullName()) == "" { + return w.marshalMessage(m2, indent, typeURL) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + w.writeComma() + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + w.write(`"value": `) + } else { + w.write(`"value":`) + } + if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil { + return err + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"@type":`) + if w.Indent != "" { + w.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + w.write(string(b)) + return nil +} + +// marshalField writes field description and value to the Writer. +func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"`) + switch { + case fd.IsExtension(): + // For message set, use the fname of the message as the extension name. + name := string(fd.FullName()) + if isMessageSet(fd.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + w.write("[" + name + "]") + case w.OrigName: + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + w.write(name) + default: + w.write(string(fd.JSONName())) + } + w.write(`":`) + if w.Indent != "" { + w.write(" ") + } + return w.marshalValue(fd, v, indent) +} + +func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case fd.IsList(): + w.write("[") + comma := "" + lv := v.List() + for i := 0; i < lv.Len(); i++ { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write("]") + return nil + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + + // Collect a sorted list of all map keys and values. + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + + w.write(`{`) + comma := "" + for _, entry := range entries { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + + s := fmt.Sprint(entry.key.Interface()) + b, err := json.Marshal(s) + if err != nil { + return err + } + w.write(string(b)) + + w.write(`:`) + if w.Indent != "" { + w.write(` `) + } + + if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write(`}`) + return nil + default: + return w.marshalSingularValue(fd, v, indent) + } +} + +func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case !v.IsValid(): + w.write("null") + return nil + case fd.Message() != nil: + return w.marshalMessage(v.Message(), indent+w.Indent, "") + case fd.Enum() != nil: + if fd.Enum().FullName() == "google.protobuf.NullValue" { + w.write("null") + return nil + } + + vd := fd.Enum().Values().ByNumber(v.Enum()) + if vd == nil || w.EnumsAsInts { + w.write(strconv.Itoa(int(v.Enum()))) + } else { + w.write(`"` + string(vd.Name()) + `"`) + } + return nil + default: + switch v.Interface().(type) { + case float32, float64: + switch { + case math.IsInf(v.Float(), +1): + w.write(`"Infinity"`) + return nil + case math.IsInf(v.Float(), -1): + w.write(`"-Infinity"`) + return nil + case math.IsNaN(v.Float()): + w.write(`"NaN"`) + return nil + } + case int64, uint64: + w.write(fmt.Sprintf(`"%d"`, v.Interface())) + return nil + } + + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + w.write(string(b)) + return nil + } +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go new file mode 100644 index 000000000..480e2448d --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/json.go @@ -0,0 +1,69 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jsonpb provides functionality to marshal and unmarshal between a +// protocol buffer message and JSON. It follows the specification at +// https://developers.google.com/protocol-buffers/docs/proto3#json. +// +// Do not rely on the default behavior of the standard encoding/json package +// when called on generated message types as it does not operate correctly. +// +// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson" +// package instead. +package jsonpb + +import ( + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// AnyResolver takes a type URL, present in an Any message, +// and resolves it into an instance of the associated message. +type AnyResolver interface { + Resolve(typeURL string) (proto.Message, error) +} + +type anyResolver struct{ AnyResolver } + +func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + return r.FindMessageByURL(string(message)) +} + +func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { + m, err := r.Resolve(url) + if err != nil { + return nil, err + } + return protoimpl.X.MessageTypeOf(m), nil +} + +func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByName(field) +} + +func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) +} + +func wellKnownType(s protoreflect.FullName) string { + if s.Parent() == "google.protobuf" { + switch s.Name() { + case "Empty", "Any", + "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue", + "Duration", "Timestamp", + "NullValue", "Struct", "Value", "ListValue": + return string(s.Name()) + } + } + return "" +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index 86d0903b8..fd2b3a42b 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -36,11 +36,12 @@ import ( "strings" "github.com/google/go-cmp/cmp/internal/diff" - "github.com/google/go-cmp/cmp/internal/flags" "github.com/google/go-cmp/cmp/internal/function" "github.com/google/go-cmp/cmp/internal/value" ) +// TODO(≥go1.18): Use any instead of interface{}. + // Equal reports whether x and y are equal by recursively applying the // following rules in the given order to x and y and all of their sub-values: // @@ -319,7 +320,6 @@ func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { } func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { - v = sanitizeValue(v, f.Type().In(0)) if !s.dynChecker.Next() { return f.Call([]reflect.Value{v})[0] } @@ -343,8 +343,6 @@ func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { } func (s *state) callTTBFunc(f, x, y reflect.Value) bool { - x = sanitizeValue(x, f.Type().In(0)) - y = sanitizeValue(y, f.Type().In(1)) if !s.dynChecker.Next() { return f.Call([]reflect.Value{x, y})[0].Bool() } @@ -372,19 +370,6 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { ret = f.Call(vs)[0] } -// sanitizeValue converts nil interfaces of type T to those of type R, -// assuming that T is assignable to R. -// Otherwise, it returns the input value as is. -func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { - // TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143). - if !flags.AtLeastGo110 { - if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { - return reflect.New(t).Elem() - } - } - return v -} - func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { var addr bool var vax, vay reflect.Value // Addressable versions of vx and vy diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go index 5ff0b4218..ae851fe53 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_panic.go +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego // +build purego package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go index 21eb54858..e2c0f74e8 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego // +build !purego package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go index 1daaaacc5..36062a604 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !cmp_debug // +build !cmp_debug package diff diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go index 4b91dbcac..a3b97a1ad 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build cmp_debug // +build cmp_debug package diff diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go deleted file mode 100644 index 82d1d7fbf..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.10 - -package flags - -// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. -const AtLeastGo110 = false diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go deleted file mode 100644 index 8646f0529..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.10 - -package flags - -// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. -const AtLeastGo110 = true diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go index b6c12cefb..7b498bb2c 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go @@ -9,6 +9,8 @@ import ( "strconv" ) +var anyType = reflect.TypeOf((*interface{})(nil)).Elem() + // TypeString is nearly identical to reflect.Type.String, // but has an additional option to specify that full type names be used. func TypeString(t reflect.Type, qualified bool) string { @@ -20,6 +22,11 @@ func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte // of the same name and within the same package, // but declared within the namespace of different functions. + // Use the "any" alias instead of "interface{}" for better readability. + if t == anyType { + return append(b, "any"...) + } + // Named type. if t.Name() != "" { if qualified && t.PkgPath() != "" { diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go index 44f4a5afd..1a71bfcbd 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego // +build purego package value diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go index a605953d4..16e6860af 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego // +build !purego package value diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index f01eff318..c71003463 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -178,7 +178,7 @@ type structField struct { unexported bool mayForce bool // Forcibly allow visibility paddr bool // Was parent addressable? - pvx, pvy reflect.Value // Parent values (always addressible) + pvx, pvy reflect.Value // Parent values (always addressable) field reflect.StructField // Field information } diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go index 104bb3053..1ef65ac1d 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -116,7 +116,10 @@ func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out } // For leaf nodes, format the value based on the reflect.Values alone. - if v.MaxDepth == 0 { + // As a special case, treat equal []byte as a leaf nodes. + isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == reflect.TypeOf(byte(0)) + isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0 + if v.MaxDepth == 0 || isEqualBytes { switch opts.DiffMode { case diffUnknown, diffIdentical: // Format Equal. diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 33f03577f..287b89358 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -207,10 +207,11 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, // Check whether this is a []byte of text data. if t.Elem() == reflect.TypeOf(byte(0)) { b := v.Bytes() - isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) } + isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) } if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { out = opts.formatString("", string(b)) - return opts.WithTypeMode(emitType).FormatType(t, out) + skipType = true + return opts.FormatType(t, out) } } @@ -281,7 +282,12 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } defer ptrs.Pop() - skipType = true // Let the underlying value print the type instead + // Skip the name only if this is an unnamed pointer type. + // Otherwise taking the address of a value does not reproduce + // the named pointer type. + if v.Type().Name() == "" { + skipType = true // Let the underlying value print the type instead + } out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) out = &textWrap{Prefix: "&", Value: out} @@ -292,7 +298,6 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } // Interfaces accept different concrete types, // so configure the underlying value to explicitly print the type. - skipType = true // Print the concrete type instead return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) default: panic(fmt.Sprintf("%v kind not handled", v.Kind())) diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index 2ad3bc85b..68b5c1ae1 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -80,7 +80,7 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { } // Use specialized string diffing for longer slices or strings. - const minLength = 64 + const minLength = 32 return vx.Len() >= minLength && vy.Len() >= minLength } @@ -563,10 +563,10 @@ func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []d nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified ny := ds.NumIdentical + ds.NumInserted + ds.NumModified var numLeadingIdentical, numTrailingIdentical int - for i := 0; i < nx && i < ny && eq(ix+i, iy+i); i++ { + for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ { numLeadingIdentical++ } - for i := 0; i < nx && i < ny && eq(ix+nx-1-i, iy+ny-1-i); i++ { + for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ { numTrailingIdentical++ } if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 { diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml new file mode 100644 index 000000000..d8156a60b --- /dev/null +++ b/vendor/github.com/google/uuid/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4.3 + - 1.5.3 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 000000000..04fdf09f1 --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 000000000..b4bb97f6b --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 000000000..5dc68268d --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md new file mode 100644 index 000000000..f765a46f9 --- /dev/null +++ b/vendor/github.com/google/uuid/README.md @@ -0,0 +1,19 @@ +# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on +[RFC 4122](http://tools.ietf.org/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +`go get github.com/google/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 000000000..fa820b9d3 --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 000000000..5b8a4b9af --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 000000000..b404f4bec --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) //nolint:errcheck + h.Write(data) //nolint:errcheck + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 000000000..14bd34072 --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + return err + } + *uuid = id + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 000000000..d651a2b06 --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 000000000..24b78edc9 --- /dev/null +++ b/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This remvoves the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 000000000..0cbbcddbd --- /dev/null +++ b/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go new file mode 100644 index 000000000..d7fcbf286 --- /dev/null +++ b/vendor/github.com/google/uuid/null.go @@ -0,0 +1,118 @@ +// Copyright 2021 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +var jsonNull = []byte("null") + +// NullUUID represents a UUID that may be null. +// NullUUID implements the SQL driver.Scanner interface so +// it can be used as a scan destination: +// +// var u uuid.NullUUID +// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) +// ... +// if u.Valid { +// // use u.UUID +// } else { +// // NULL value +// } +// +type NullUUID struct { + UUID UUID + Valid bool // Valid is true if UUID is not NULL +} + +// Scan implements the SQL driver.Scanner interface. +func (nu *NullUUID) Scan(value interface{}) error { + if value == nil { + nu.UUID, nu.Valid = Nil, false + return nil + } + + err := nu.UUID.Scan(value) + if err != nil { + nu.Valid = false + return err + } + + nu.Valid = true + return nil +} + +// Value implements the driver Valuer interface. +func (nu NullUUID) Value() (driver.Value, error) { + if !nu.Valid { + return nil, nil + } + // Delegate to UUID Value function + return nu.UUID.Value() +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (nu NullUUID) MarshalBinary() ([]byte, error) { + if nu.Valid { + return nu.UUID[:], nil + } + + return []byte(nil), nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (nu *NullUUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(nu.UUID[:], data) + nu.Valid = true + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (nu NullUUID) MarshalText() ([]byte, error) { + if nu.Valid { + return nu.UUID.MarshalText() + } + + return jsonNull, nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (nu *NullUUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + nu.Valid = false + return err + } + nu.UUID = id + nu.Valid = true + return nil +} + +// MarshalJSON implements json.Marshaler. +func (nu NullUUID) MarshalJSON() ([]byte, error) { + if nu.Valid { + return json.Marshal(nu.UUID) + } + + return jsonNull, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (nu *NullUUID) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, jsonNull) { + *nu = NullUUID{} + return nil // valid null UUID + } + err := json.Unmarshal(data, &nu.UUID) + nu.Valid = err == nil + return err +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 000000000..2e02ec06c --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently. +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 000000000..e6ef06cdc --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 000000000..5ea6c7378 --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 000000000..a57207aeb --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,294 @@ +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" + "sync" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +const randPoolSize = 16 * 16 + +var ( + rander = rand.Reader // random function + poolEnabled = false + poolMu sync.Mutex + poolPos = randPoolSize // protected with poolMu + pool [randPoolSize]byte // protected with poolMu +) + +type invalidLengthError struct{ len int } + +func (err invalidLengthError) Error() string { + return fmt.Sprintf("invalid UUID length: %d", err.len) +} + +// IsInvalidLengthError is matcher function for custom error invalidLengthError +func IsInvalidLengthError(err error) bool { + _, ok := err.(invalidLengthError) + return ok +} + +// Parse decodes s into a UUID or returns an error. Both the standard UUID +// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the +// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex +// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(s)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(b)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} + +// EnableRandPool enables internal randomness pool used for Random +// (Version 4) UUID generation. The pool contains random bytes read from +// the random number generator on demand in batches. Enabling the pool +// may improve the UUID generation throughput significantly. +// +// Since the pool is stored on the Go heap, this feature may be a bad fit +// for security sensitive applications. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func EnableRandPool() { + poolEnabled = true +} + +// DisableRandPool disables the randomness pool if it was previously +// enabled with EnableRandPool. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func DisableRandPool() { + poolEnabled = false + defer poolMu.Unlock() + poolMu.Lock() + poolPos = randPoolSize +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 000000000..463109629 --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 000000000..7697802e4 --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,76 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewString creates a new random UUID and returns it as a string or panics. +// NewString is equivalent to the expression +// +// uuid.New().String() +func NewString() string { + return Must(NewRandom()).String() +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// Uses the randomness pool if it was enabled with EnableRandPool. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + if !poolEnabled { + return NewRandomFromReader(rander) + } + return newRandomFromPool() +} + +// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. +func NewRandomFromReader(r io.Reader) (UUID, error) { + var uuid UUID + _, err := io.ReadFull(r, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} + +func newRandomFromPool() (UUID, error) { + var uuid UUID + poolMu.Lock() + if poolPos == randPoolSize { + _, err := io.ReadFull(rander, pool[:]) + if err != nil { + poolMu.Unlock() + return Nil, err + } + poolPos = 0 + } + copy(uuid[:], pool[poolPos:(poolPos+16)]) + poolPos += 16 + poolMu.Unlock() + + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE b/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go new file mode 100644 index 000000000..81f54d5ef --- /dev/null +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go @@ -0,0 +1,151 @@ +// Copyright 2022 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Client is a cross-platform client for the signer binary (a.k.a."EnterpriseCertSigner"). +// The signer binary is OS-specific, but exposes a standard set of APIs for the client to use. +package client + +import ( + "crypto" + "crypto/rsa" + "crypto/x509" + "encoding/gob" + "fmt" + "io" + "net/rpc" + "os" + "os/exec" + + "github.com/googleapis/enterprise-certificate-proxy/client/util" +) + +const signAPI = "EnterpriseCertSigner.Sign" +const certificateChainAPI = "EnterpriseCertSigner.CertificateChain" +const publicKeyAPI = "EnterpriseCertSigner.Public" + +// A Connection wraps a pair of unidirectional streams as an io.ReadWriteCloser. +type Connection struct { + io.ReadCloser + io.WriteCloser +} + +// Close closes c's underlying ReadCloser and WriteCloser. +func (c *Connection) Close() error { + rerr := c.ReadCloser.Close() + werr := c.WriteCloser.Close() + if rerr != nil { + return rerr + } + return werr +} + +func init() { + gob.Register(crypto.SHA256) + gob.Register(&rsa.PSSOptions{}) +} + +// SignArgs contains arguments to a crypto Signer.Sign method. +type SignArgs struct { + Digest []byte // The content to sign. + Opts crypto.SignerOpts // Options for signing, such as Hash identifier. +} + +// Key implements credential.Credential by holding the executed signer subprocess. +type Key struct { + cmd *exec.Cmd // Pointer to the signer subprocess. + client *rpc.Client // Pointer to the rpc client that communicates with the signer subprocess. + publicKey crypto.PublicKey // Public key of loaded certificate. + chain [][]byte // Certificate chain of loaded certificate. +} + +// CertificateChain returns the credential as a raw X509 cert chain. This contains the public key. +func (k *Key) CertificateChain() [][]byte { + return k.chain +} + +// Close closes the RPC connection and kills the signer subprocess. +// Call this to free up resources when the Key object is no longer needed. +func (k *Key) Close() error { + if err := k.client.Close(); err != nil { + return fmt.Errorf("failed to close RPC connection: %w", err) + } + if err := k.cmd.Process.Kill(); err != nil { + return fmt.Errorf("failed to kill signer process: %w", err) + } + if err := k.cmd.Wait(); err.Error() != "signal: killed" { + return fmt.Errorf("signer process was not killed: %w", err) + } + return nil +} + +// Public returns the public key for this Key. +func (k *Key) Public() crypto.PublicKey { + return k.publicKey +} + +// Sign signs a message by encrypting a message digest, using the specified signer options. +func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed []byte, err error) { + err = k.client.Call(signAPI, SignArgs{Digest: digest, Opts: opts}, &signed) + return +} + +// Cred spawns a signer subprocess that listens on stdin/stdout to perform certificate +// related operations, including signing messages with the private key. +// +// The signer binary path is read from the specified configFilePath, if provided. +// Otherwise, use the default config file path. +// +// The config file also specifies which certificate the signer should use. +func Cred(configFilePath string) (*Key, error) { + if configFilePath == "" { + configFilePath = util.GetDefaultConfigFilePath() + } + enterpriseCertSignerPath, err := util.LoadSignerBinaryPath(configFilePath) + if err != nil { + return nil, err + } + k := &Key{ + cmd: exec.Command(enterpriseCertSignerPath, configFilePath), + } + + // Redirect errors from subprocess to parent process. + k.cmd.Stderr = os.Stderr + + // RPC client will communicate with subprocess over stdin/stdout. + kin, err := k.cmd.StdinPipe() + if err != nil { + return nil, err + } + kout, err := k.cmd.StdoutPipe() + if err != nil { + return nil, err + } + k.client = rpc.NewClient(&Connection{kout, kin}) + + if err := k.cmd.Start(); err != nil { + return nil, fmt.Errorf("starting enterprise cert signer subprocess: %w", err) + } + + if err := k.client.Call(certificateChainAPI, struct{}{}, &k.chain); err != nil { + return nil, fmt.Errorf("failed to retrieve certificate chain: %w", err) + } + + var publicKeyBytes []byte + if err := k.client.Call(publicKeyAPI, struct{}{}, &publicKeyBytes); err != nil { + return nil, fmt.Errorf("failed to retrieve public key: %w", err) + } + + publicKey, err := x509.ParsePKIXPublicKey(publicKeyBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse public key: %w", err) + } + + var ok bool + k.publicKey, ok = publicKey.(crypto.PublicKey) + if !ok { + return nil, fmt.Errorf("invalid public key type: %T", publicKey) + } + + return k, nil +} diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go new file mode 100644 index 000000000..6b5f2806e --- /dev/null +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go @@ -0,0 +1,72 @@ +// Package util provides helper functions for the client. +package util + +import ( + "encoding/json" + "errors" + "io/ioutil" + "os" + "os/user" + "path/filepath" + "runtime" +) + +const configFileName = "enterprise_certificate_config.json" + +// EnterpriseCertificateConfig contains parameters for initializing signer. +type EnterpriseCertificateConfig struct { + Libs Libs `json:"libs"` +} + +// Libs specifies the locations of helper libraries. +type Libs struct { + SignerBinary string `json:"signer_binary"` +} + +// LoadSignerBinaryPath retrieves the path of the signer binary from the config file. +func LoadSignerBinaryPath(configFilePath string) (path string, err error) { + jsonFile, err := os.Open(configFilePath) + if err != nil { + return "", err + } + + byteValue, err := ioutil.ReadAll(jsonFile) + if err != nil { + return "", err + } + var config EnterpriseCertificateConfig + err = json.Unmarshal(byteValue, &config) + if err != nil { + return "", err + } + signerBinaryPath := config.Libs.SignerBinary + if signerBinaryPath == "" { + return "", errors.New("Signer binary path is missing.") + } + return signerBinaryPath, nil +} + +func guessHomeDir() string { + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + if v := os.Getenv("HOME"); v != "" { + return v + } + // Else, fall back to user.Current: + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} + +func getDefaultConfigFileDirectory() (directory string) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud") + } else { + return filepath.Join(guessHomeDir(), ".config/gcloud") + } +} + +// GetDefaultConfigFilePath returns the default path of the enterprise certificate config file created by gCloud. +func GetDefaultConfigFilePath() (path string) { + return filepath.Join(getDefaultConfigFileDirectory(), configFileName) +} diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json new file mode 100644 index 000000000..0e643a05b --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + "v2": "2.4.0" +} diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md new file mode 100644 index 000000000..b42ace44c --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -0,0 +1,18 @@ +# Changelog + +## [2.4.0](https://github.com/googleapis/gax-go/compare/v2.3.0...v2.4.0) (2022-05-09) + + +### Features + +* **v2:** add OnHTTPCodes CallOption ([#188](https://github.com/googleapis/gax-go/issues/188)) ([ba7c534](https://github.com/googleapis/gax-go/commit/ba7c5348363ab6c33e1cee3c03c0be68a46ca07c)) + + +### Bug Fixes + +* **v2/apierror:** use errors.As in FromError ([#189](https://github.com/googleapis/gax-go/issues/189)) ([f30f05b](https://github.com/googleapis/gax-go/commit/f30f05be583828f4c09cca4091333ea88ff8d79e)) + + +### Miscellaneous Chores + +* **v2:** bump release-please processing ([#192](https://github.com/googleapis/gax-go/issues/192)) ([56172f9](https://github.com/googleapis/gax-go/commit/56172f971d1141d7687edaac053ad3470af76719)) diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go index 869379da9..7d0128a0c 100644 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -28,10 +28,11 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Package apierror implements a wrapper error for parsing error details from -// API calls. Currently, only errors representing a gRPC status are supported. +// API calls. Both HTTP & gRPC status errors are supported. package apierror import ( + "errors" "fmt" "strings" @@ -215,7 +216,8 @@ func FromError(err error) (*APIError, bool) { ae := APIError{err: err} st, isStatus := status.FromError(err) - herr, isHTTPErr := err.(*googleapi.Error) + var herr *googleapi.Error + isHTTPErr := errors.As(err, &herr) switch { case isStatus: diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go index 27b34c06e..7dd9b8373 100644 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go @@ -14,9 +14,9 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 +// protoc-gen-go v1.28.0 // protoc v3.15.8 -// source: error.proto +// source: apierror/internal/proto/error.proto package jsonerror @@ -55,7 +55,7 @@ type Error struct { func (x *Error) Reset() { *x = Error{} if protoimpl.UnsafeEnabled { - mi := &file_error_proto_msgTypes[0] + mi := &file_apierror_internal_proto_error_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -68,7 +68,7 @@ func (x *Error) String() string { func (*Error) ProtoMessage() {} func (x *Error) ProtoReflect() protoreflect.Message { - mi := &file_error_proto_msgTypes[0] + mi := &file_apierror_internal_proto_error_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -81,7 +81,7 @@ func (x *Error) ProtoReflect() protoreflect.Message { // Deprecated: Use Error.ProtoReflect.Descriptor instead. func (*Error) Descriptor() ([]byte, []int) { - return file_error_proto_rawDescGZIP(), []int{0} + return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0} } func (x *Error) GetError() *Error_Status { @@ -112,7 +112,7 @@ type Error_Status struct { func (x *Error_Status) Reset() { *x = Error_Status{} if protoimpl.UnsafeEnabled { - mi := &file_error_proto_msgTypes[1] + mi := &file_apierror_internal_proto_error_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -125,7 +125,7 @@ func (x *Error_Status) String() string { func (*Error_Status) ProtoMessage() {} func (x *Error_Status) ProtoReflect() protoreflect.Message { - mi := &file_error_proto_msgTypes[1] + mi := &file_apierror_internal_proto_error_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -138,7 +138,7 @@ func (x *Error_Status) ProtoReflect() protoreflect.Message { // Deprecated: Use Error_Status.ProtoReflect.Descriptor instead. func (*Error_Status) Descriptor() ([]byte, []int) { - return file_error_proto_rawDescGZIP(), []int{0, 0} + return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0, 0} } func (x *Error_Status) GetCode() int32 { @@ -169,53 +169,55 @@ func (x *Error_Status) GetDetails() []*anypb.Any { return nil } -var File_error_proto protoreflect.FileDescriptor +var File_apierror_internal_proto_error_proto protoreflect.FileDescriptor -var file_error_proto_rawDesc = []byte{ - 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, - 0x12, 0x29, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x90, 0x01, 0x0a, 0x06, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, - 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e, - 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x43, - 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, - 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +var file_apierror_internal_proto_error_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x19, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, + 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x1a, 0x90, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, + 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( - file_error_proto_rawDescOnce sync.Once - file_error_proto_rawDescData = file_error_proto_rawDesc + file_apierror_internal_proto_error_proto_rawDescOnce sync.Once + file_apierror_internal_proto_error_proto_rawDescData = file_apierror_internal_proto_error_proto_rawDesc ) -func file_error_proto_rawDescGZIP() []byte { - file_error_proto_rawDescOnce.Do(func() { - file_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_error_proto_rawDescData) +func file_apierror_internal_proto_error_proto_rawDescGZIP() []byte { + file_apierror_internal_proto_error_proto_rawDescOnce.Do(func() { + file_apierror_internal_proto_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_apierror_internal_proto_error_proto_rawDescData) }) - return file_error_proto_rawDescData + return file_apierror_internal_proto_error_proto_rawDescData } -var file_error_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_error_proto_goTypes = []interface{}{ +var file_apierror_internal_proto_error_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_apierror_internal_proto_error_proto_goTypes = []interface{}{ (*Error)(nil), // 0: error.Error (*Error_Status)(nil), // 1: error.Error.Status (code.Code)(0), // 2: google.rpc.Code (*anypb.Any)(nil), // 3: google.protobuf.Any } -var file_error_proto_depIdxs = []int32{ +var file_apierror_internal_proto_error_proto_depIdxs = []int32{ 1, // 0: error.Error.error:type_name -> error.Error.Status 2, // 1: error.Error.Status.status:type_name -> google.rpc.Code 3, // 2: error.Error.Status.details:type_name -> google.protobuf.Any @@ -226,13 +228,13 @@ var file_error_proto_depIdxs = []int32{ 0, // [0:3] is the sub-list for field type_name } -func init() { file_error_proto_init() } -func file_error_proto_init() { - if File_error_proto != nil { +func init() { file_apierror_internal_proto_error_proto_init() } +func file_apierror_internal_proto_error_proto_init() { + if File_apierror_internal_proto_error_proto != nil { return } if !protoimpl.UnsafeEnabled { - file_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_apierror_internal_proto_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Error); i { case 0: return &v.state @@ -244,7 +246,7 @@ func file_error_proto_init() { return nil } } - file_error_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_apierror_internal_proto_error_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Error_Status); i { case 0: return &v.state @@ -261,18 +263,18 @@ func file_error_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_error_proto_rawDesc, + RawDescriptor: file_apierror_internal_proto_error_proto_rawDesc, NumEnums: 0, NumMessages: 2, NumExtensions: 0, NumServices: 0, }, - GoTypes: file_error_proto_goTypes, - DependencyIndexes: file_error_proto_depIdxs, - MessageInfos: file_error_proto_msgTypes, + GoTypes: file_apierror_internal_proto_error_proto_goTypes, + DependencyIndexes: file_apierror_internal_proto_error_proto_depIdxs, + MessageInfos: file_apierror_internal_proto_error_proto_msgTypes, }.Build() - File_error_proto = out.File - file_error_proto_rawDesc = nil - file_error_proto_goTypes = nil - file_error_proto_depIdxs = nil + File_apierror_internal_proto_error_proto = out.File + file_apierror_internal_proto_error_proto_rawDesc = nil + file_apierror_internal_proto_error_proto_goTypes = nil + file_apierror_internal_proto_error_proto_depIdxs = nil } diff --git a/vendor/github.com/googleapis/gax-go/v2/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go index 425a7668d..e09200556 100644 --- a/vendor/github.com/googleapis/gax-go/v2/call_option.go +++ b/vendor/github.com/googleapis/gax-go/v2/call_option.go @@ -30,9 +30,11 @@ package gax import ( + "errors" "math/rand" "time" + "google.golang.org/api/googleapi" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -119,6 +121,41 @@ func (r *boRetryer) Retry(err error) (time.Duration, bool) { return 0, false } +// OnHTTPCodes returns a Retryer that retries if and only if +// the previous attempt returns a googleapi.Error whose status code is stored in +// cc. Pause times between retries are specified by bo. +// +// bo is only used for its parameters; each Retryer has its own copy. +func OnHTTPCodes(bo Backoff, cc ...int) Retryer { + codes := make(map[int]bool, len(cc)) + for _, c := range cc { + codes[c] = true + } + + return &httpRetryer{ + backoff: bo, + codes: codes, + } +} + +type httpRetryer struct { + backoff Backoff + codes map[int]bool +} + +func (r *httpRetryer) Retry(err error) (time.Duration, bool) { + var gerr *googleapi.Error + if !errors.As(err, &gerr) { + return 0, false + } + + if r.codes[gerr.Code] { + return r.backoff.Pause(), true + } + + return 0, false +} + // Backoff implements exponential backoff. The wait time between retries is a // random value between 0 and the "retry period" - the time between retries. The // retry period starts at Initial and increases by the factor of Multiplier @@ -173,6 +210,21 @@ func (o grpcOpt) Resolve(s *CallSettings) { s.GRPC = o } +type pathOpt struct { + p string +} + +func (p pathOpt) Resolve(s *CallSettings) { + s.Path = p.p +} + +// WithPath applies a Path override to the HTTP-based APICall. +// +// This is for internal use only. +func WithPath(p string) CallOption { + return &pathOpt{p: p} +} + // WithGRPCOptions allows passing gRPC call options during client creation. func WithGRPCOptions(opt ...grpc.CallOption) CallOption { return grpcOpt(append([]grpc.CallOption(nil), opt...)) @@ -186,4 +238,7 @@ type CallSettings struct { // CallOptions to be forwarded to GRPC. GRPC []grpc.CallOption + + // Path is an HTTP override for an APICall. + Path string } diff --git a/vendor/github.com/googleapis/gax-go/v2/gax.go b/vendor/github.com/googleapis/gax-go/v2/gax.go index f634b4372..36cdfa33e 100644 --- a/vendor/github.com/googleapis/gax-go/v2/gax.go +++ b/vendor/github.com/googleapis/gax-go/v2/gax.go @@ -35,5 +35,7 @@ // to simplify code generation and to provide more convenient and idiomatic API surfaces. package gax +import "github.com/googleapis/gax-go/v2/internal" + // Version specifies the gax-go version being used. -const Version = "2.1.1" +const Version = internal.Version diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go new file mode 100644 index 000000000..bf272a504 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -0,0 +1,33 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package internal + +// Version is the current tagged release of the library. +const Version = "2.4.0" diff --git a/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go new file mode 100644 index 000000000..cc4486eb9 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go @@ -0,0 +1,126 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "encoding/json" + "errors" + "io" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var ( + arrayOpen = json.Delim('[') + arrayClose = json.Delim(']') + errBadOpening = errors.New("unexpected opening token, expected '['") +) + +// ProtoJSONStream represents a wrapper for consuming a stream of protobuf +// messages encoded using protobuf-JSON format. More information on this format +// can be found at https://developers.google.com/protocol-buffers/docs/proto3#json. +// The stream must appear as a comma-delimited, JSON array of obbjects with +// opening and closing square braces. +// +// This is for internal use only. +type ProtoJSONStream struct { + first, closed bool + reader io.ReadCloser + stream *json.Decoder + typ protoreflect.MessageType +} + +// NewProtoJSONStreamReader accepts a stream of bytes via an io.ReadCloser that are +// protobuf-JSON encoded protobuf messages of the given type. The ProtoJSONStream +// must be closed when done. +// +// This is for internal use only. +func NewProtoJSONStreamReader(rc io.ReadCloser, typ protoreflect.MessageType) *ProtoJSONStream { + return &ProtoJSONStream{ + first: true, + reader: rc, + stream: json.NewDecoder(rc), + typ: typ, + } +} + +// Recv decodes the next protobuf message in the stream or returns io.EOF if +// the stream is done. It is not safe to call Recv on the same stream from +// different goroutines, just like it is not safe to do so with a single gRPC +// stream. Type-cast the protobuf message returned to the type provided at +// ProtoJSONStream creation. +// Calls to Recv after calling Close will produce io.EOF. +func (s *ProtoJSONStream) Recv() (proto.Message, error) { + if s.closed { + return nil, io.EOF + } + if s.first { + s.first = false + + // Consume the opening '[' so Decode gets one object at a time. + if t, err := s.stream.Token(); err != nil { + return nil, err + } else if t != arrayOpen { + return nil, errBadOpening + } + } + + // Capture the next block of data for the item (a JSON object) in the stream. + var raw json.RawMessage + if err := s.stream.Decode(&raw); err != nil { + e := err + // To avoid checking the first token of each stream, just attempt to + // Decode the next blob and if that fails, double check if it is just + // the closing token ']'. If it is the closing, return io.EOF. If it + // isn't, return the original error. + if t, _ := s.stream.Token(); t == arrayClose { + e = io.EOF + } + return nil, e + } + + // Initialize a new instance of the protobuf message to unmarshal the + // raw data into. + m := s.typ.New().Interface() + err := protojson.Unmarshal(raw, m) + + return m, err +} + +// Close closes the stream so that resources are cleaned up. +func (s *ProtoJSONStream) Close() error { + // Dereference the *json.Decoder so that the memory is gc'd. + s.stream = nil + s.closed = true + + return s.reader.Close() +} diff --git a/vendor/github.com/googleapis/gax-go/v2/release-please-config.json b/vendor/github.com/googleapis/gax-go/v2/release-please-config.json new file mode 100644 index 000000000..61ee266a1 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/release-please-config.json @@ -0,0 +1,10 @@ +{ + "release-type": "go-yoshi", + "separate-pull-requests": true, + "include-component-in-tag": false, + "packages": { + "v2": { + "component": "v2" + } + } +} diff --git a/vendor/golang.org/x/net/bpf/doc.go b/vendor/golang.org/x/net/bpf/doc.go index ae62feb53..04ec1c8ab 100644 --- a/vendor/golang.org/x/net/bpf/doc.go +++ b/vendor/golang.org/x/net/bpf/doc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. /* - Package bpf implements marshaling and unmarshaling of programs for the Berkeley Packet Filter virtual machine, and provides a Go implementation of the virtual machine. @@ -21,7 +20,7 @@ access to kernel functions, and while conditional branches are allowed, they can only jump forwards, to guarantee that there are no infinite loops. -The virtual machine +# The virtual machine The BPF VM is an accumulator machine. Its main register, called register A, is an implicit source and destination in all arithmetic @@ -50,7 +49,7 @@ to extensions, which are essentially calls to kernel utility functions. Currently, the only extensions supported by this package are the Linux packet filter extensions. -Examples +# Examples This packet filter selects all ARP packets. @@ -77,6 +76,5 @@ This packet filter captures a random 1% sample of traffic. // Ignore. bpf.RetConstant{Val: 0}, }) - */ package bpf // import "golang.org/x/net/bpf" diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go index a3c021d3f..cf66309c4 100644 --- a/vendor/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -21,9 +21,9 @@ // explicitly to each function that needs it. The Context should be the first // parameter, typically named ctx: // -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } // // Do not pass a nil Context, even if a function permits it. Pass context.TODO // if you are unsure about which Context to use. diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go index 344bd1433..0a54bdbcc 100644 --- a/vendor/golang.org/x/net/context/go17.go +++ b/vendor/golang.org/x/net/context/go17.go @@ -54,11 +54,11 @@ func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete: // -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { return WithDeadline(parent, time.Now().Add(timeout)) } diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go index 5270db5db..7b6b68511 100644 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -264,11 +264,11 @@ func (c *timerCtx) cancel(removeFromParent bool, err error) { // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete: // -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { return WithDeadline(parent, time.Now().Add(timeout)) } diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go index c79aa73f2..6e071e852 100644 --- a/vendor/golang.org/x/net/http/httpguts/httplex.go +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -173,13 +173,15 @@ func tokenEqual(t1, t2 string) bool { // isLWS reports whether b is linear white space, according // to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// LWS = [CRLF] 1*( SP | HT ) +// +// LWS = [CRLF] 1*( SP | HT ) func isLWS(b byte) bool { return b == ' ' || b == '\t' } // isCTL reports whether b is a control byte, according // to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// CTL = +// +// CTL = func isCTL(b byte) bool { const del = 0x7f // a CTL return b < ' ' || b == del @@ -189,12 +191,13 @@ func isCTL(b byte) bool { // HTTP/2 imposes the additional restriction that uppercase ASCII // letters are not allowed. // -// RFC 7230 says: -// header-field = field-name ":" OWS field-value OWS -// field-name = token -// token = 1*tchar -// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / -// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +// RFC 7230 says: +// +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA func ValidHeaderFieldName(v string) bool { if len(v) == 0 { return false @@ -267,27 +270,28 @@ var validHostByte = [256]bool{ // ValidHeaderFieldValue reports whether v is a valid "field-value" according to // http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : // -// message-header = field-name ":" [ field-value ] -// field-value = *( field-content | LWS ) -// field-content = +// message-header = field-name ":" [ field-value ] +// field-value = *( field-content | LWS ) +// field-content = // // http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : // -// TEXT = -// LWS = [CRLF] 1*( SP | HT ) -// CTL = +// TEXT = +// LWS = [CRLF] 1*( SP | HT ) +// CTL = // // RFC 7230 says: -// field-value = *( field-content / obs-fold ) -// obj-fold = N/A to http2, and deprecated -// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] -// field-vchar = VCHAR / obs-text -// obs-text = %x80-FF -// VCHAR = "any visible [USASCII] character" +// +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" // // http2 further says: "Similarly, HTTP/2 allows header field values // that are not valid. While most of the values that can be encoded diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go index c936843ea..780968d6c 100644 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -139,7 +139,6 @@ func (p *clientConnPool) getStartDialLocked(ctx context.Context, addr string) *d func (c *dialCall) dial(ctx context.Context, addr string) { const singleUse = false // shared conn c.res, c.err = c.p.t.dialClientConn(ctx, addr, singleUse) - close(c.done) c.p.mu.Lock() delete(c.p.dialing, addr) @@ -147,6 +146,8 @@ func (c *dialCall) dial(ctx context.Context, addr string) { c.p.addConnLocked(addr, c.res) } c.p.mu.Unlock() + + close(c.done) } // addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go index 2663e5d28..f2067dabc 100644 --- a/vendor/golang.org/x/net/http2/errors.go +++ b/vendor/golang.org/x/net/http2/errors.go @@ -136,7 +136,7 @@ func (e headerFieldNameError) Error() string { type headerFieldValueError string func (e headerFieldValueError) Error() string { - return fmt.Sprintf("invalid header field value %q", string(e)) + return fmt.Sprintf("invalid header field value for %q", string(e)) } var ( diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 96a747905..0178647ee 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1532,7 +1532,8 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) } if !httpguts.ValidHeaderFieldValue(hf.Value) { - invalid = headerFieldValueError(hf.Value) + // Don't include the value in the error, because it may be sensitive. + invalid = headerFieldValueError(hf.Name) } isPseudo := strings.HasPrefix(hf.Name, ":") if isPseudo { diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go index fe0b84ccd..20d083a71 100644 --- a/vendor/golang.org/x/net/http2/hpack/huffman.go +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -169,25 +169,50 @@ func buildRootHuffmanNode() { // AppendHuffmanString appends s, as encoded in Huffman codes, to dst // and returns the extended buffer. func AppendHuffmanString(dst []byte, s string) []byte { - rembits := uint8(8) - + // This relies on the maximum huffman code length being 30 (See tables.go huffmanCodeLen array) + // So if a uint64 buffer has less than 32 valid bits can always accommodate another huffmanCode. + var ( + x uint64 // buffer + n uint // number valid of bits present in x + ) for i := 0; i < len(s); i++ { - if rembits == 8 { - dst = append(dst, 0) + c := s[i] + n += uint(huffmanCodeLen[c]) + x <<= huffmanCodeLen[c] % 64 + x |= uint64(huffmanCodes[c]) + if n >= 32 { + n %= 32 // Normally would be -= 32 but %= 32 informs compiler 0 <= n <= 31 for upcoming shift + y := uint32(x >> n) // Compiler doesn't combine memory writes if y isn't uint32 + dst = append(dst, byte(y>>24), byte(y>>16), byte(y>>8), byte(y)) } - dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i]) } - - if rembits < 8 { - // special EOS symbol - code := uint32(0x3fffffff) - nbits := uint8(30) - - t := uint8(code >> (nbits - rembits)) - dst[len(dst)-1] |= t + // Add padding bits if necessary + if over := n % 8; over > 0 { + const ( + eosCode = 0x3fffffff + eosNBits = 30 + eosPadByte = eosCode >> (eosNBits - 8) + ) + pad := 8 - over + x = (x << pad) | (eosPadByte >> over) + n += pad // 8 now divides into n exactly } - - return dst + // n in (0, 8, 16, 24, 32) + switch n / 8 { + case 0: + return dst + case 1: + return append(dst, byte(x)) + case 2: + y := uint16(x) + return append(dst, byte(y>>8), byte(y)) + case 3: + y := uint16(x >> 8) + return append(dst, byte(y>>8), byte(y), byte(x)) + } + // case 4: + y := uint32(x) + return append(dst, byte(y>>24), byte(y>>16), byte(y>>8), byte(y)) } // HuffmanEncodeLength returns the number of bytes required to encode @@ -199,35 +224,3 @@ func HuffmanEncodeLength(s string) uint64 { } return (n + 7) / 8 } - -// appendByteToHuffmanCode appends Huffman code for c to dst and -// returns the extended buffer and the remaining bits in the last -// element. The appending is not byte aligned and the remaining bits -// in the last element of dst is given in rembits. -func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { - code := huffmanCodes[c] - nbits := huffmanCodeLen[c] - - for { - if rembits > nbits { - t := uint8(code << (rembits - nbits)) - dst[len(dst)-1] |= t - rembits -= nbits - break - } - - t := uint8(code >> (nbits - rembits)) - dst[len(dst)-1] |= t - - nbits -= rembits - rembits = 8 - - if nbits == 0 { - break - } - - dst = append(dst, 0) - } - - return dst, rembits -} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 5571ccfd2..479ba4b2b 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -13,7 +13,6 @@ // See https://http2.github.io/ for more information on HTTP/2. // // See https://http2.golang.org/ for a test server running this code. -// package http2 // import "golang.org/x/net/http2" import ( @@ -176,10 +175,11 @@ func (s SettingID) String() string { // name (key). See httpguts.ValidHeaderName for the base rules. // // Further, http2 says: -// "Just as in HTTP/1.x, header field names are strings of ASCII -// characters that are compared in a case-insensitive -// fashion. However, header field names MUST be converted to -// lowercase prior to their encoding in HTTP/2. " +// +// "Just as in HTTP/1.x, header field names are strings of ASCII +// characters that are compared in a case-insensitive +// fashion. However, header field names MUST be converted to +// lowercase prior to their encoding in HTTP/2. " func validWireHeaderFieldName(v string) bool { if len(v) == 0 { return false @@ -365,8 +365,8 @@ func (s *sorter) SortStrings(ss []string) { // validPseudoPath reports whether v is a valid :path pseudo-header // value. It must be either: // -// *) a non-empty string starting with '/' -// *) the string '*', for OPTIONS requests. +// - a non-empty string starting with '/' +// - the string '*', for OPTIONS requests. // // For now this is only used a quick check for deciding when to clean // up Opaque URLs before sending requests from the Transport. diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index e644d9b2f..47524a61a 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -315,6 +315,20 @@ type ServeConnOpts struct { // requests. If nil, BaseConfig.Handler is used. If BaseConfig // or BaseConfig.Handler is nil, http.DefaultServeMux is used. Handler http.Handler + + // UpgradeRequest is an initial request received on a connection + // undergoing an h2c upgrade. The request body must have been + // completely read from the connection before calling ServeConn, + // and the 101 Switching Protocols response written. + UpgradeRequest *http.Request + + // Settings is the decoded contents of the HTTP2-Settings header + // in an h2c upgrade request. + Settings []byte + + // SawClientPreface is set if the HTTP/2 connection preface + // has already been read from the connection. + SawClientPreface bool } func (o *ServeConnOpts) context() context.Context { @@ -383,6 +397,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { headerTableSize: initialHeaderTableSize, serveG: newGoroutineLock(), pushEnabled: true, + sawClientPreface: opts.SawClientPreface, } s.state.registerConn(sc) @@ -400,7 +415,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { if s.NewWriteScheduler != nil { sc.writeSched = s.NewWriteScheduler() } else { - sc.writeSched = NewRandomWriteScheduler() + sc.writeSched = NewPriorityWriteScheduler(nil) } // These start at the RFC-specified defaults. If there is a higher @@ -465,9 +480,27 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { } } + if opts.Settings != nil { + fr := &SettingsFrame{ + FrameHeader: FrameHeader{valid: true}, + p: opts.Settings, + } + if err := fr.ForeachSetting(sc.processSetting); err != nil { + sc.rejectConn(ErrCodeProtocol, "invalid settings") + return + } + opts.Settings = nil + } + if hook := testHookGetServerConn; hook != nil { hook(sc) } + + if opts.UpgradeRequest != nil { + sc.upgradeRequest(opts.UpgradeRequest) + opts.UpgradeRequest = nil + } + sc.serve() } @@ -512,6 +545,7 @@ type serverConn struct { // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() pushEnabled bool + sawClientPreface bool // preface has already been read, used in h2c upgrade sawFirstSettings bool // got the initial SETTINGS frame after the preface needToSendSettingsAck bool unackedSettings int // how many SETTINGS have we sent without ACKs? @@ -974,6 +1008,9 @@ var errPrefaceTimeout = errors.New("timeout waiting for client preface") // returns errPrefaceTimeout on timeout, or an error if the greeting // is invalid. func (sc *serverConn) readPreface() error { + if sc.sawClientPreface { + return nil + } errc := make(chan error, 1) go func() { // Read the client preface @@ -1915,6 +1952,26 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { return nil } +func (sc *serverConn) upgradeRequest(req *http.Request) { + sc.serveG.check() + id := uint32(1) + sc.maxClientStreamID = id + st := sc.newStream(id, 0, stateHalfClosedRemote) + st.reqTrailer = req.Trailer + if st.reqTrailer != nil { + st.trailer = make(http.Header) + } + rw := sc.newResponseWriter(st, req) + + // Disable any read deadline set by the net/http package + // prior to the upgrade. + if sc.hs.ReadTimeout != 0 { + sc.conn.SetReadDeadline(time.Time{}) + } + + go sc.runHandler(rw, req, sc.handler.ServeHTTP) +} + func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { sc := st.sc sc.serveG.check() @@ -2145,6 +2202,11 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r } req = req.WithContext(st.ctx) + rw := sc.newResponseWriter(st, req) + return rw, req, nil +} + +func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *responseWriter { rws := responseWriterStatePool.Get().(*responseWriterState) bwSave := rws.bw *rws = responseWriterState{} // zero all the fields @@ -2153,10 +2215,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r rws.bw.Reset(chunkWriter{rws}) rws.stream = st rws.req = req - rws.body = body - - rw := &responseWriter{rws: rws} - return rw, req, nil + return &responseWriter{rws: rws} } // Run on its own goroutine. @@ -2316,17 +2375,18 @@ type requestBody struct { _ incomparable stream *stream conn *serverConn - closed bool // for use by Close only - sawEOF bool // for use by Read only - pipe *pipe // non-nil if we have a HTTP entity message body - needsContinue bool // need to send a 100-continue + closeOnce sync.Once // for use by Close only + sawEOF bool // for use by Read only + pipe *pipe // non-nil if we have a HTTP entity message body + needsContinue bool // need to send a 100-continue } func (b *requestBody) Close() error { - if b.pipe != nil && !b.closed { - b.pipe.BreakWithError(errClosedBody) - } - b.closed = true + b.closeOnce.Do(func() { + if b.pipe != nil { + b.pipe.BreakWithError(errClosedBody) + } + }) return nil } @@ -2370,7 +2430,6 @@ type responseWriterState struct { // immutable within a request: stream *stream req *http.Request - body *requestBody // to close at end of request, if DATA frames didn't conn *serverConn // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc @@ -2546,8 +2605,9 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { // prior to the headers being written. If the set of trailers is fixed // or known before the header is written, the normal Go trailers mechanism // is preferred: -// https://golang.org/pkg/net/http/#ResponseWriter -// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +// +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers const TrailerPrefix = "Trailer:" // promoteUndeclaredTrailers permits http.Handlers to set trailers @@ -2643,8 +2703,7 @@ func checkWriteHeaderCode(code int) { // Issue 22880: require valid WriteHeader status codes. // For now we only enforce that it's three digits. // In the future we might block things over 599 (600 and above aren't defined - // at http://httpwg.org/specs/rfc7231.html#status.codes) - // and we might block under 200 (once we have more mature 1xx support). + // at http://httpwg.org/specs/rfc7231.html#status.codes). // But for now any three digits. // // We used to send "HTTP/1.1 000 0" on the wire in responses but there's @@ -2665,13 +2724,41 @@ func (w *responseWriter) WriteHeader(code int) { } func (rws *responseWriterState) writeHeader(code int) { - if !rws.wroteHeader { - checkWriteHeaderCode(code) - rws.wroteHeader = true - rws.status = code - if len(rws.handlerHeader) > 0 { - rws.snapHeader = cloneHeader(rws.handlerHeader) + if rws.wroteHeader { + return + } + + checkWriteHeaderCode(code) + + // Handle informational headers + if code >= 100 && code <= 199 { + // Per RFC 8297 we must not clear the current header map + h := rws.handlerHeader + + _, cl := h["Content-Length"] + _, te := h["Transfer-Encoding"] + if cl || te { + h = h.Clone() + h.Del("Content-Length") + h.Del("Transfer-Encoding") + } + + if rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + httpResCode: code, + h: h, + endStream: rws.handlerDone && !rws.hasTrailers(), + }) != nil { + rws.dirty = true } + + return + } + + rws.wroteHeader = true + rws.status = code + if len(rws.handlerHeader) > 0 { + rws.snapHeader = cloneHeader(rws.handlerHeader) } } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 4f0989763..4ded4dfd5 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -16,7 +16,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "log" "math" mathrand "math/rand" @@ -501,12 +500,14 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res if req, err = shouldRetryRequest(req, err); err == nil { // After the first retry, do exponential backoff with 10% jitter. if retry == 0 { + t.vlogf("RoundTrip retrying after failure: %v", err) continue } backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) select { case <-time.After(time.Second * time.Duration(backoff)): + t.vlogf("RoundTrip retrying after failure: %v", err) continue case <-req.Context().Done(): err = req.Context().Err() @@ -732,10 +733,13 @@ func (cc *ClientConn) healthCheck() { // trigger the healthCheck again if there is no frame received. ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) defer cancel() + cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) if err != nil { + cc.vlogf("http2: Transport health check failure: %v", err) cc.closeForLostPing() - return + } else { + cc.vlogf("http2: Transport health check success") } } @@ -1765,7 +1769,8 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } for _, v := range vv { if !httpguts.ValidHeaderFieldValue(v) { - return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) + // Don't include the value in the error, because it may be sensitive. + return nil, fmt.Errorf("invalid HTTP header value for header %q", k) } } } @@ -2898,7 +2903,12 @@ func (t *Transport) logf(format string, args ...interface{}) { log.Printf(format, args...) } -var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) +var noBody io.ReadCloser = noBodyReader{} + +type noBodyReader struct{} + +func (noBodyReader) Close() error { return nil } +func (noBodyReader) Read([]byte) (int, error) { return 0, io.EOF } type missingBody struct{} diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go index 2618b2c11..0a242c669 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -383,16 +383,15 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { var n *priorityNode - if id := wr.StreamID(); id == 0 { + if wr.isControl() { n = &ws.root } else { + id := wr.StreamID() n = ws.nodes[id] if n == nil { // id is an idle or closed stream. wr should not be a HEADERS or - // DATA frame. However, wr can be a RST_STREAM. In this case, we - // push wr onto the root, rather than creating a new priorityNode, - // since RST_STREAM is tiny and the stream's priority is unknown - // anyway. See issue #17919. + // DATA frame. In other case, we push wr onto the root, rather + // than creating a new priorityNode. if wr.DataSize() > 0 { panic("add DATA on non-open stream") } diff --git a/vendor/golang.org/x/net/idna/trieval.go b/vendor/golang.org/x/net/idna/trieval.go index 7a8cf889b..9c070a44b 100644 --- a/vendor/golang.org/x/net/idna/trieval.go +++ b/vendor/golang.org/x/net/idna/trieval.go @@ -17,23 +17,23 @@ package idna // // The per-rune values have the following format: // -// if mapped { -// if inlinedXOR { -// 15..13 inline XOR marker -// 12..11 unused -// 10..3 inline XOR mask -// } else { -// 15..3 index into xor or mapping table -// } -// } else { -// 15..14 unused -// 13 mayNeedNorm -// 12..11 attributes -// 10..8 joining type -// 7..3 category type -// } -// 2 use xor pattern -// 1..0 mapped category +// if mapped { +// if inlinedXOR { +// 15..13 inline XOR marker +// 12..11 unused +// 10..3 inline XOR mask +// } else { +// 15..3 index into xor or mapping table +// } +// } else { +// 15..14 unused +// 13 mayNeedNorm +// 12..11 attributes +// 10..8 joining type +// 7..3 category type +// } +// 2 use xor pattern +// 1..0 mapped category // // See the definitions below for a more detailed description of the various // bits. diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc.go index 59b71da57..4c19269be 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc.go @@ -4,32 +4,32 @@ package socket type iovec struct { - Base *byte - Len uint32 + Base *byte + Len uint32 } type msghdr struct { - Name *byte - Namelen uint32 - Iov *iovec - Iovlen uint32 - Control *byte - Controllen uint32 - Flags int32 + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 } type mmsghdr struct { - Hdr msghdr - Len uint32 + Hdr msghdr + Len uint32 } type cmsghdr struct { - Len uint32 - Level int32 - Type int32 + Len uint32 + Level int32 + Type int32 } const ( - sizeofIovec = 0x8 - sizeofMsghdr = 0x1c + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c ) diff --git a/vendor/golang.org/x/net/ipv4/doc.go b/vendor/golang.org/x/net/ipv4/doc.go index 245834979..6fbdc52b9 100644 --- a/vendor/golang.org/x/net/ipv4/doc.go +++ b/vendor/golang.org/x/net/ipv4/doc.go @@ -16,8 +16,7 @@ // 3376. // Source-specific multicast is defined in RFC 4607. // -// -// Unicasting +// # Unicasting // // The options for unicasting are available for net.TCPConn, // net.UDPConn and net.IPConn which are created as network connections @@ -51,8 +50,7 @@ // }(c) // } // -// -// Multicasting +// # Multicasting // // The options for multicasting are available for net.UDPConn and // net.IPConn which are created as network connections that use the @@ -141,8 +139,7 @@ // } // } // -// -// More multicasting +// # More multicasting // // An application that uses PacketConn or RawConn may join multiple // multicast groups. For example, a UDP listener with port 1024 might @@ -200,8 +197,7 @@ // // error handling // } // -// -// Source-specific multicasting +// # Source-specific multicasting // // An application that uses PacketConn or RawConn on IGMPv3 supported // platform is able to join source-specific multicast groups. diff --git a/vendor/golang.org/x/net/ipv6/doc.go b/vendor/golang.org/x/net/ipv6/doc.go index e0be9d50d..2148b814f 100644 --- a/vendor/golang.org/x/net/ipv6/doc.go +++ b/vendor/golang.org/x/net/ipv6/doc.go @@ -17,8 +17,7 @@ // On Darwin, this package requires OS X Mavericks version 10.9 or // above, or equivalent. // -// -// Unicasting +// # Unicasting // // The options for unicasting are available for net.TCPConn, // net.UDPConn and net.IPConn which are created as network connections @@ -52,8 +51,7 @@ // }(c) // } // -// -// Multicasting +// # Multicasting // // The options for multicasting are available for net.UDPConn and // net.IPConn which are created as network connections that use the @@ -140,8 +138,7 @@ // } // } // -// -// More multicasting +// # More multicasting // // An application that uses PacketConn may join multiple multicast // groups. For example, a UDP listener with port 1024 might join two @@ -199,8 +196,7 @@ // // error handling // } // -// -// Source-specific multicasting +// # Source-specific multicasting // // An application that uses PacketConn on MLDv2 supported platform is // able to join source-specific multicast groups. diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go index 200617ea8..e2fddd645 100644 --- a/vendor/golang.org/x/net/publicsuffix/list.go +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -33,9 +33,10 @@ // the last two are not (but share the same eTLD+1: "google.com"). // // All of these domains have the same eTLD+1: -// - "www.books.amazon.co.uk" -// - "books.amazon.co.uk" -// - "amazon.co.uk" +// - "www.books.amazon.co.uk" +// - "books.amazon.co.uk" +// - "amazon.co.uk" +// // Specifically, the eTLD+1 is "amazon.co.uk", because the eTLD is "co.uk". // // There is no closed form algorithm to calculate the eTLD of a domain. diff --git a/vendor/golang.org/x/net/publicsuffix/table.go b/vendor/golang.org/x/net/publicsuffix/table.go index fd3c3ca48..a44423976 100644 --- a/vendor/golang.org/x/net/publicsuffix/table.go +++ b/vendor/golang.org/x/net/publicsuffix/table.go @@ -540,6 +540,7 @@ const text = "9guacuiababia-goracleaningroks-theatree164-balsfjordd-dnshome-we" // An I denotes an ICANN domain. // // The layout within the uint32, from MSB to LSB, is: +// // [ 0 bits] unused // [10 bits] children index // [ 1 bits] ICANN bit @@ -9898,6 +9899,7 @@ var nodes = [...]uint32{ // will be in the range [0, 6), depending on the wildcard bit and node type. // // The layout within the uint32, from MSB to LSB, is: +// // [ 1 bits] unused // [ 1 bits] wildcard bit // [ 2 bits] node type diff --git a/vendor/golang.org/x/oauth2/authhandler/authhandler.go b/vendor/golang.org/x/oauth2/authhandler/authhandler.go index 69967cf87..9bc6cd7bc 100644 --- a/vendor/golang.org/x/oauth2/authhandler/authhandler.go +++ b/vendor/golang.org/x/oauth2/authhandler/authhandler.go @@ -13,11 +13,36 @@ import ( "golang.org/x/oauth2" ) +const ( + // Parameter keys for AuthCodeURL method to support PKCE. + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + + // Parameter key for Exchange method to support PKCE. + codeVerifierKey = "code_verifier" +) + +// PKCEParams holds parameters to support PKCE. +type PKCEParams struct { + Challenge string // The unpadded, base64-url-encoded string of the encrypted code verifier. + ChallengeMethod string // The encryption method (ex. S256). + Verifier string // The original, non-encrypted secret. +} + // AuthorizationHandler is a 3-legged-OAuth helper that prompts // the user for OAuth consent at the specified auth code URL // and returns an auth code and state upon approval. type AuthorizationHandler func(authCodeURL string) (code string, state string, err error) +// TokenSourceWithPKCE is an enhanced version of TokenSource with PKCE support. +// +// The pkce parameter supports PKCE flow, which uses code challenge and code verifier +// to prevent CSRF attacks. A unique code challenge and code verifier should be generated +// by the caller at runtime. See https://www.oauth.com/oauth2-servers/pkce/ for more info. +func TokenSourceWithPKCE(ctx context.Context, config *oauth2.Config, state string, authHandler AuthorizationHandler, pkce *PKCEParams) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, authHandlerSource{config: config, ctx: ctx, authHandler: authHandler, state: state, pkce: pkce}) +} + // TokenSource returns an oauth2.TokenSource that fetches access tokens // using 3-legged-OAuth flow. // @@ -33,7 +58,7 @@ type AuthorizationHandler func(authCodeURL string) (code string, state string, e // and response before exchanging the auth code for OAuth token to prevent CSRF // attacks. func TokenSource(ctx context.Context, config *oauth2.Config, state string, authHandler AuthorizationHandler) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, authHandlerSource{config: config, ctx: ctx, authHandler: authHandler, state: state}) + return TokenSourceWithPKCE(ctx, config, state, authHandler, nil) } type authHandlerSource struct { @@ -41,10 +66,17 @@ type authHandlerSource struct { config *oauth2.Config authHandler AuthorizationHandler state string + pkce *PKCEParams } func (source authHandlerSource) Token() (*oauth2.Token, error) { - url := source.config.AuthCodeURL(source.state) + // Step 1: Obtain auth code. + var authCodeUrlOptions []oauth2.AuthCodeOption + if source.pkce != nil && source.pkce.Challenge != "" && source.pkce.ChallengeMethod != "" { + authCodeUrlOptions = []oauth2.AuthCodeOption{oauth2.SetAuthURLParam(codeChallengeKey, source.pkce.Challenge), + oauth2.SetAuthURLParam(codeChallengeMethodKey, source.pkce.ChallengeMethod)} + } + url := source.config.AuthCodeURL(source.state, authCodeUrlOptions...) code, state, err := source.authHandler(url) if err != nil { return nil, err @@ -52,5 +84,11 @@ func (source authHandlerSource) Token() (*oauth2.Token, error) { if state != source.state { return nil, errors.New("state mismatch in 3-legged-OAuth flow") } - return source.config.Exchange(source.ctx, code) + + // Step 2: Exchange auth code for access token. + var exchangeOptions []oauth2.AuthCodeOption + if source.pkce != nil && source.pkce.Verifier != "" { + exchangeOptions = []oauth2.AuthCodeOption{oauth2.SetAuthURLParam(codeVerifierKey, source.pkce.Verifier)} + } + return source.config.Exchange(source.ctx, code, exchangeOptions...) } diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index 880dd7b59..7ed02cd41 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -54,11 +54,14 @@ type CredentialsParams struct { // Optional. Subject string - // AuthHandler is the AuthorizationHandler used for 3-legged OAuth flow. Optional. + // AuthHandler is the AuthorizationHandler used for 3-legged OAuth flow. Required for 3LO flow. AuthHandler authhandler.AuthorizationHandler - // State is a unique string used with AuthHandler. Optional. + // State is a unique string used with AuthHandler. Required for 3LO flow. State string + + // PKCE is used to support PKCE flow. Optional for 3LO flow. + PKCE *authhandler.PKCEParams } func (params CredentialsParams) deepCopy() CredentialsParams { @@ -94,20 +97,20 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc // It looks for credentials in the following places, // preferring the first location found: // -// 1. A JSON file whose path is specified by the -// GOOGLE_APPLICATION_CREDENTIALS environment variable. -// For workload identity federation, refer to -// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation on -// how to generate the JSON configuration file for on-prem/non-Google cloud -// platforms. -// 2. A JSON file in a location known to the gcloud command-line tool. -// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. -// On other systems, $HOME/.config/gcloud/application_default_credentials.json. -// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses -// the appengine.AccessToken function. -// 4. On Google Compute Engine, Google App Engine standard second generation runtimes -// (>= Go 1.11), and Google App Engine flexible environment, it fetches -// credentials from the metadata server. +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// For workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation on +// how to generate the JSON configuration file for on-prem/non-Google cloud +// platforms. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses +// the appengine.AccessToken function. +// 4. On Google Compute Engine, Google App Engine standard second generation runtimes +// (>= Go 1.11), and Google App Engine flexible environment, it fetches +// credentials from the metadata server. func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsParams) (*Credentials, error) { // Make defensive copy of the slices in params. params = params.deepCopy() @@ -176,7 +179,7 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params if config != nil { return &Credentials{ ProjectID: "", - TokenSource: authhandler.TokenSource(ctx, config, params.State, params.AuthHandler), + TokenSource: authhandler.TokenSourceWithPKCE(ctx, config, params.State, params.AuthHandler, params.PKCE), JSON: jsonData, }, nil } @@ -190,6 +193,7 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params if err != nil { return nil, err } + ts = newErrWrappingTokenSource(ts) return &DefaultCredentials{ ProjectID: f.ProjectID, TokenSource: ts, diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go index 8e6a57ce9..dddf65144 100644 --- a/vendor/golang.org/x/oauth2/google/doc.go +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -15,14 +15,14 @@ // For more information on using workload identity federation, refer to // https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation. // -// OAuth2 Configs +// # OAuth2 Configs // // Two functions in this package return golang.org/x/oauth2.Config values from Google credential // data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, // the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or // create an http.Client. // -// Workload Identity Federation +// # Workload Identity Federation // // Using workload identity federation, your application can access Google Cloud // resources from Amazon Web Services (AWS), Microsoft Azure or any identity @@ -36,9 +36,9 @@ // Follow the detailed instructions on how to configure Workload Identity Federation // in various platforms: // -// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/access-resources-aws -// Microsoft Azure: https://cloud.google.com/iam/docs/access-resources-azure -// OIDC identity provider: https://cloud.google.com/iam/docs/access-resources-oidc +// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/access-resources-aws +// Microsoft Azure: https://cloud.google.com/iam/docs/access-resources-azure +// OIDC identity provider: https://cloud.google.com/iam/docs/access-resources-oidc // // For OIDC providers, the library can retrieve OIDC tokens either from a // local file location (file-sourced credentials) or from a local server @@ -51,8 +51,7 @@ // return the OIDC token. The response can be in plain text or JSON. // Additional required request headers can also be specified. // -// -// Credentials +// # Credentials // // The Credentials type represents Google credentials, including Application Default // Credentials. diff --git a/vendor/golang.org/x/oauth2/google/error.go b/vendor/golang.org/x/oauth2/google/error.go new file mode 100644 index 000000000..d84dd0047 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/error.go @@ -0,0 +1,64 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "errors" + + "golang.org/x/oauth2" +) + +// AuthenticationError indicates there was an error in the authentication flow. +// +// Use (*AuthenticationError).Temporary to check if the error can be retried. +type AuthenticationError struct { + err *oauth2.RetrieveError +} + +func newAuthenticationError(err error) error { + re := &oauth2.RetrieveError{} + if !errors.As(err, &re) { + return err + } + return &AuthenticationError{ + err: re, + } +} + +// Temporary indicates that the network error has one of the following status codes and may be retried: 500, 503, 408, or 429. +func (e *AuthenticationError) Temporary() bool { + if e.err.Response == nil { + return false + } + sc := e.err.Response.StatusCode + return sc == 500 || sc == 503 || sc == 408 || sc == 429 +} + +func (e *AuthenticationError) Error() string { + return e.err.Error() +} + +func (e *AuthenticationError) Unwrap() error { + return e.err +} + +type errWrappingTokenSource struct { + src oauth2.TokenSource +} + +func newErrWrappingTokenSource(ts oauth2.TokenSource) oauth2.TokenSource { + return &errWrappingTokenSource{src: ts} +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *errWrappingTokenSource) Token() (*oauth2.Token, error) { + t, err := s.src.Token() + if err != nil { + return nil, newAuthenticationError(err) + } + return t, nil +} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index ccc23ee0a..ceddd5dde 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -139,6 +139,7 @@ func (f *credentialsFile) jwtConfig(scopes []string, subject string) *jwt.Config Scopes: scopes, TokenURL: f.TokenURL, Subject: subject, // This is the user email to impersonate + Audience: f.Audience, } if cfg.TokenURL == "" { cfg.TokenURL = JWTTokenURL diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go index a5a5423c6..e917195d5 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go @@ -52,6 +52,13 @@ const ( // The AWS authorization header name for the security session token if available. awsSecurityTokenHeader = "x-amz-security-token" + // The name of the header containing the session token for metadata endpoint calls + awsIMDSv2SessionTokenHeader = "X-aws-ec2-metadata-token" + + awsIMDSv2SessionTtlHeader = "X-aws-ec2-metadata-token-ttl-seconds" + + awsIMDSv2SessionTtl = "300" + // The AWS authorization header name for the auto-generated date. awsDateHeader = "x-amz-date" @@ -241,6 +248,7 @@ type awsCredentialSource struct { RegionURL string RegionalCredVerificationURL string CredVerificationURL string + IMDSv2SessionTokenURL string TargetResource string requestSigner *awsRequestSigner region string @@ -268,12 +276,22 @@ func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, erro func (cs awsCredentialSource) subjectToken() (string, error) { if cs.requestSigner == nil { - awsSecurityCredentials, err := cs.getSecurityCredentials() + awsSessionToken, err := cs.getAWSSessionToken() + if err != nil { + return "", err + } + + headers := make(map[string]string) + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken + } + + awsSecurityCredentials, err := cs.getSecurityCredentials(headers) if err != nil { return "", err } - if cs.region, err = cs.getRegion(); err != nil { + if cs.region, err = cs.getRegion(headers); err != nil { return "", err } @@ -340,7 +358,37 @@ func (cs awsCredentialSource) subjectToken() (string, error) { return url.QueryEscape(string(result)), nil } -func (cs *awsCredentialSource) getRegion() (string, error) { +func (cs *awsCredentialSource) getAWSSessionToken() (string, error) { + if cs.IMDSv2SessionTokenURL == "" { + return "", nil + } + + req, err := http.NewRequest("PUT", cs.IMDSv2SessionTokenURL, nil) + if err != nil { + return "", err + } + + req.Header.Add(awsIMDSv2SessionTtlHeader, awsIMDSv2SessionTtl) + + resp, err := cs.doRequest(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", err + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("oauth2/google: unable to retrieve AWS session token - %s", string(respBody)) + } + + return string(respBody), nil +} + +func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, error) { if envAwsRegion := getenv("AWS_REGION"); envAwsRegion != "" { return envAwsRegion, nil } @@ -357,6 +405,10 @@ func (cs *awsCredentialSource) getRegion() (string, error) { return "", err } + for name, value := range headers { + req.Header.Add(name, value) + } + resp, err := cs.doRequest(req) if err != nil { return "", err @@ -381,7 +433,7 @@ func (cs *awsCredentialSource) getRegion() (string, error) { return string(respBody[:respBodyEnd]), nil } -func (cs *awsCredentialSource) getSecurityCredentials() (result awsSecurityCredentials, err error) { +func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result awsSecurityCredentials, err error) { if accessKeyID := getenv("AWS_ACCESS_KEY_ID"); accessKeyID != "" { if secretAccessKey := getenv("AWS_SECRET_ACCESS_KEY"); secretAccessKey != "" { return awsSecurityCredentials{ @@ -392,12 +444,12 @@ func (cs *awsCredentialSource) getSecurityCredentials() (result awsSecurityCrede } } - roleName, err := cs.getMetadataRoleName() + roleName, err := cs.getMetadataRoleName(headers) if err != nil { return } - credentials, err := cs.getMetadataSecurityCredentials(roleName) + credentials, err := cs.getMetadataSecurityCredentials(roleName, headers) if err != nil { return } @@ -413,7 +465,7 @@ func (cs *awsCredentialSource) getSecurityCredentials() (result awsSecurityCrede return credentials, nil } -func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string) (awsSecurityCredentials, error) { +func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, headers map[string]string) (awsSecurityCredentials, error) { var result awsSecurityCredentials req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", cs.CredVerificationURL, roleName), nil) @@ -422,6 +474,10 @@ func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string) ( } req.Header.Add("Content-Type", "application/json") + for name, value := range headers { + req.Header.Add(name, value) + } + resp, err := cs.doRequest(req) if err != nil { return result, err @@ -441,7 +497,7 @@ func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string) ( return result, err } -func (cs *awsCredentialSource) getMetadataRoleName() (string, error) { +func (cs *awsCredentialSource) getMetadataRoleName(headers map[string]string) (string, error) { if cs.CredVerificationURL == "" { return "", errors.New("oauth2/google: unable to determine the AWS metadata server security credentials endpoint") } @@ -451,6 +507,10 @@ func (cs *awsCredentialSource) getMetadataRoleName() (string, error) { return "", err } + for name, value := range headers { + req.Header.Add(name, value) + } + resp, err := cs.doRequest(req) if err != nil { return "", err diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go index bc3ce5317..83ce9c245 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go @@ -175,6 +175,7 @@ type CredentialSource struct { RegionURL string `json:"region_url"` RegionalCredVerificationURL string `json:"regional_cred_verification_url"` CredVerificationURL string `json:"cred_verification_url"` + IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` Format format `json:"format"` } @@ -185,14 +186,20 @@ func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { if awsVersion != 1 { return nil, fmt.Errorf("oauth2/google: aws version '%d' is not supported in the current build", awsVersion) } - return awsCredentialSource{ + + awsCredSource := awsCredentialSource{ EnvironmentID: c.CredentialSource.EnvironmentID, RegionURL: c.CredentialSource.RegionURL, RegionalCredVerificationURL: c.CredentialSource.RegionalCredVerificationURL, CredVerificationURL: c.CredentialSource.URL, TargetResource: c.Audience, ctx: ctx, - }, nil + } + if c.CredentialSource.IMDSv2SessionTokenURL != "" { + awsCredSource.IMDSv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL + } + + return awsCredSource, nil } } else if c.CredentialSource.File != "" { return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go index 67d97b990..e89e6ae17 100644 --- a/vendor/golang.org/x/oauth2/google/jwt.go +++ b/vendor/golang.org/x/oauth2/google/jwt.go @@ -66,7 +66,8 @@ func newJWTSource(jsonKey []byte, audience string, scopes []string) (oauth2.Toke if err != nil { return nil, err } - return oauth2.ReuseTokenSource(tok, ts), nil + rts := newErrWrappingTokenSource(oauth2.ReuseTokenSource(tok, ts)) + return rts, nil } type jwtAccessTokenSource struct { diff --git a/vendor/golang.org/x/sys/execabs/execabs.go b/vendor/golang.org/x/sys/execabs/execabs.go index 78192498d..b981cfbb4 100644 --- a/vendor/golang.org/x/sys/execabs/execabs.go +++ b/vendor/golang.org/x/sys/execabs/execabs.go @@ -53,7 +53,7 @@ func relError(file, path string) error { // LookPath instead returns an error. func LookPath(file string) (string, error) { path, err := exec.LookPath(file) - if err != nil { + if err != nil && !isGo119ErrDot(err) { return "", err } if filepath.Base(file) == file && !filepath.IsAbs(path) { diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go new file mode 100644 index 000000000..6ab5f5089 --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs_go118.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.19 +// +build !go1.19 + +package execabs + +func isGo119ErrDot(err error) bool { + return false +} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go new file mode 100644 index 000000000..1e7a9ada0 --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs_go119.go @@ -0,0 +1,15 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package execabs + +import "strings" + +func isGo119ErrDot(err error) bool { + // TODO: return errors.Is(err, exec.ErrDot) + return strings.Contains(err.Error(), "current directory") +} diff --git a/vendor/golang.org/x/sys/plan9/syscall.go b/vendor/golang.org/x/sys/plan9/syscall.go index 602473cba..a25223b8f 100644 --- a/vendor/golang.org/x/sys/plan9/syscall.go +++ b/vendor/golang.org/x/sys/plan9/syscall.go @@ -113,5 +113,6 @@ func (tv *Timeval) Nano() int64 { // use is a no-op, but the compiler cannot see that it is. // Calling use(p) ensures that p is kept live until that point. +// //go:noescape func use(p unsafe.Pointer) diff --git a/vendor/golang.org/x/sys/plan9/syscall_plan9.go b/vendor/golang.org/x/sys/plan9/syscall_plan9.go index 723b1f400..d079d8116 100644 --- a/vendor/golang.org/x/sys/plan9/syscall_plan9.go +++ b/vendor/golang.org/x/sys/plan9/syscall_plan9.go @@ -115,6 +115,7 @@ func Write(fd int, p []byte) (n int, err error) { var ioSync int64 //sys fd2path(fd int, buf []byte) (err error) + func Fd2path(fd int) (path string, err error) { var buf [512]byte @@ -126,6 +127,7 @@ func Fd2path(fd int) (path string, err error) { } //sys pipe(p *[2]int32) (err error) + func Pipe(p []int) (err error) { if len(p) != 2 { return syscall.ErrorString("bad arg in system call") @@ -180,6 +182,7 @@ func (w Waitmsg) ExitStatus() int { } //sys await(s []byte) (n int, err error) + func Await(w *Waitmsg) (err error) { var buf [512]byte var f [5][]byte @@ -301,42 +304,49 @@ func Getgroups() (gids []int, err error) { } //sys open(path string, mode int) (fd int, err error) + func Open(path string, mode int) (fd int, err error) { fixwd() return open(path, mode) } //sys create(path string, mode int, perm uint32) (fd int, err error) + func Create(path string, mode int, perm uint32) (fd int, err error) { fixwd() return create(path, mode, perm) } //sys remove(path string) (err error) + func Remove(path string) error { fixwd() return remove(path) } //sys stat(path string, edir []byte) (n int, err error) + func Stat(path string, edir []byte) (n int, err error) { fixwd() return stat(path, edir) } //sys bind(name string, old string, flag int) (err error) + func Bind(name string, old string, flag int) (err error) { fixwd() return bind(name, old, flag) } //sys mount(fd int, afd int, old string, flag int, aname string) (err error) + func Mount(fd int, afd int, old string, flag int, aname string) (err error) { fixwd() return mount(fd, afd, old, flag, aname) } //sys wstat(path string, edir []byte) (err error) + func Wstat(path string, edir []byte) (err error) { fixwd() return wstat(path, edir) diff --git a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s index 6abd48eef..565357288 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s @@ -30,7 +30,7 @@ TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 MOVV trap+0(FP), R11 // syscall entry SYSCALL MOVV R4, r1+32(FP) - MOVV R5, r2+40(FP) + MOVV R0, r2+40(FP) // r2 is not used. Always set to 0 JAL runtime·exitsyscall(SB) RET @@ -50,5 +50,5 @@ TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 MOVV trap+0(FP), R11 // syscall entry SYSCALL MOVV R4, r1+32(FP) - MOVV R5, r2+40(FP) + MOVV R0, r2+40(FP) // r2 is not used. Always set to 0 RET diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index 4362f47e2..b0f2bc4ae 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // -//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh -// +build 386 amd64 amd64p32 alpha arm arm64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh +//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh +// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh package unix diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go index 934af313c..15721a510 100644 --- a/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -8,7 +8,6 @@ package unix import ( - "bytes" "unsafe" ) @@ -45,13 +44,7 @@ func NewIfreq(name string) (*Ifreq, error) { // Name returns the interface name associated with the Ifreq. func (ifr *Ifreq) Name() string { - // BytePtrToString requires a NULL terminator or the program may crash. If - // one is not present, just return the empty string. - if !bytes.Contains(ifr.raw.Ifrn[:], []byte{0x00}) { - return "" - } - - return BytePtrToString(&ifr.raw.Ifrn[0]) + return ByteSliceToString(ifr.raw.Ifrn[:]) } // According to netdevice(7), only AF_INET addresses are returned for numerous diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index f2a114fc2..ac579c60f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -37,6 +37,7 @@ func Creat(path string, mode uint32) (fd int, err error) { } //sys utimes(path string, times *[2]Timeval) (err error) + func Utimes(path string, tv []Timeval) error { if len(tv) != 2 { return EINVAL @@ -45,6 +46,7 @@ func Utimes(path string, tv []Timeval) error { } //sys utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) + func UtimesNano(path string, ts []Timespec) error { if len(ts) != 2 { return EINVAL @@ -215,12 +217,12 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { return } -func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { +func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { // Recvmsg not implemented on AIX return -1, -1, -1, ENOSYS } -func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { +func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { // SendmsgN not implemented on AIX return -1, ENOSYS } @@ -300,11 +302,13 @@ func direntNamlen(buf []byte) (uint64, bool) { } //sys getdirent(fd int, buf []byte) (n int, err error) + func Getdents(fd int, buf []byte) (n int, err error) { return getdirent(fd, buf) } //sys wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, err error) + func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { var status _C_int var r Pid_t @@ -372,6 +376,7 @@ func (w WaitStatus) TrapCause() int { return -1 } //sys fcntl(fd int, cmd int, arg int) (val int, err error) //sys fsyncRange(fd int, how int, start int64, length int64) (err error) = fsync_range + func Fsync(fd int) error { return fsyncRange(fd, O_SYNC, 0, 0) } @@ -536,6 +541,7 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { //sys Getsystemcfg(label int) (n uint64) //sys umount(target string) (err error) + func Unmount(target string, flags int) (err error) { if flags != 0 { // AIX doesn't have any flags for umount. diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 9c87c5f07..c437fc5d7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -325,27 +325,26 @@ func GetsockoptString(fd, level, opt int) (string, error) { //sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) -func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { +func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(rsa)) msg.Namelen = uint32(SizeofSockaddrAny) - var iov Iovec - if len(p) > 0 { - iov.Base = (*byte)(unsafe.Pointer(&p[0])) - iov.SetLen(len(p)) - } var dummy byte if len(oob) > 0 { // receive at least one normal byte - if len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + if emptyIovecs(iov) { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } msg.Control = (*byte)(unsafe.Pointer(&oob[0])) msg.SetControllen(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = recvmsg(fd, &msg, flags); err != nil { return } @@ -356,31 +355,32 @@ func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) -func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { +func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(ptr)) msg.Namelen = uint32(salen) - var iov Iovec - if len(p) > 0 { - iov.Base = (*byte)(unsafe.Pointer(&p[0])) - iov.SetLen(len(p)) - } var dummy byte + var empty bool if len(oob) > 0 { // send at least one normal byte - if len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + empty := emptyIovecs(iov) + if empty { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } msg.Control = (*byte)(unsafe.Pointer(&oob[0])) msg.SetControllen(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = sendmsg(fd, &msg, flags); err != nil { return 0, err } - if len(oob) > 0 && len(p) == 0 { + if len(oob) > 0 && empty { n = 0 } return n, nil diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 09a25c653..4f87f16ea 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -393,6 +393,13 @@ func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { return x, err } +func GetsockoptTCPConnectionInfo(fd, level, opt int) (*TCPConnectionInfo, error) { + var value TCPConnectionInfo + vallen := _Socklen(SizeofTCPConnectionInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + func SysctlKinfoProc(name string, args ...int) (*KinfoProc, error) { mib, err := sysctlmib(name, args...) if err != nil { @@ -504,6 +511,7 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) @@ -572,7 +580,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { // Nfssvc // Getfh // Quotactl -// Mount // Csops // Waitid // Add_profil diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index c61e27498..61c0d0de1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -125,11 +125,13 @@ func Pipe2(p []int, flags int) (err error) { } //sys extpread(fd int, p []byte, flags int, offset int64) (n int, err error) + func pread(fd int, p []byte, offset int64) (n int, err error) { return extpread(fd, p, 0, offset) } //sys extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error) + func pwrite(fd int, p []byte, offset int64) (n int, err error) { return extpwrite(fd, p, 0, offset) } diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go index 8d5f294c4..e48244a9c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_illumos.go +++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -20,10 +20,9 @@ func bytes2iovec(bs [][]byte) []Iovec { for i, b := range bs { iovecs[i].SetLen(len(b)) if len(b) > 0 { - // somehow Iovec.Base on illumos is (*int8), not (*byte) - iovecs[i].Base = (*int8)(unsafe.Pointer(&b[0])) + iovecs[i].Base = &b[0] } else { - iovecs[i].Base = (*int8)(unsafe.Pointer(&_zero)) + iovecs[i].Base = (*byte)(unsafe.Pointer(&_zero)) } } return iovecs diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index d251dafae..5e4a94f73 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -512,24 +512,24 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) { // // Server example: // -// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) -// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{ -// Channel: 1, -// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00 -// }) -// _ = Listen(fd, 1) -// nfd, sa, _ := Accept(fd) -// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd) -// Read(nfd, buf) +// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) +// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{ +// Channel: 1, +// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00 +// }) +// _ = Listen(fd, 1) +// nfd, sa, _ := Accept(fd) +// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd) +// Read(nfd, buf) // // Client example: // -// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) -// _ = Connect(fd, &SockaddrRFCOMM{ -// Channel: 1, -// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11 -// }) -// Write(fd, []byte(`hello`)) +// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) +// _ = Connect(fd, &SockaddrRFCOMM{ +// Channel: 1, +// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11 +// }) +// Write(fd, []byte(`hello`)) type SockaddrRFCOMM struct { // Addr represents a bluetooth address, byte ordering is little-endian. Addr [6]uint8 @@ -556,12 +556,12 @@ func (sa *SockaddrRFCOMM) sockaddr() (unsafe.Pointer, _Socklen, error) { // The SockaddrCAN struct must be bound to the socket file descriptor // using Bind before the CAN socket can be used. // -// // Read one raw CAN frame -// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW) -// addr := &SockaddrCAN{Ifindex: index} -// Bind(fd, addr) -// frame := make([]byte, 16) -// Read(fd, frame) +// // Read one raw CAN frame +// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW) +// addr := &SockaddrCAN{Ifindex: index} +// Bind(fd, addr) +// frame := make([]byte, 16) +// Read(fd, frame) // // The full SocketCAN documentation can be found in the linux kernel // archives at: https://www.kernel.org/doc/Documentation/networking/can.txt @@ -632,13 +632,13 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) { // Here is an example of using an AF_ALG socket with SHA1 hashing. // The initial socket setup process is as follows: // -// // Open a socket to perform SHA1 hashing. -// fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0) -// addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"} -// unix.Bind(fd, addr) -// // Note: unix.Accept does not work at this time; must invoke accept() -// // manually using unix.Syscall. -// hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0) +// // Open a socket to perform SHA1 hashing. +// fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0) +// addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"} +// unix.Bind(fd, addr) +// // Note: unix.Accept does not work at this time; must invoke accept() +// // manually using unix.Syscall. +// hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0) // // Once a file descriptor has been returned from Accept, it may be used to // perform SHA1 hashing. The descriptor is not safe for concurrent use, but @@ -647,39 +647,39 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) { // When hashing a small byte slice or string, a single Write and Read may // be used: // -// // Assume hashfd is already configured using the setup process. -// hash := os.NewFile(hashfd, "sha1") -// // Hash an input string and read the results. Each Write discards -// // previous hash state. Read always reads the current state. -// b := make([]byte, 20) -// for i := 0; i < 2; i++ { -// io.WriteString(hash, "Hello, world.") -// hash.Read(b) -// fmt.Println(hex.EncodeToString(b)) -// } -// // Output: -// // 2ae01472317d1935a84797ec1983ae243fc6aa28 -// // 2ae01472317d1935a84797ec1983ae243fc6aa28 +// // Assume hashfd is already configured using the setup process. +// hash := os.NewFile(hashfd, "sha1") +// // Hash an input string and read the results. Each Write discards +// // previous hash state. Read always reads the current state. +// b := make([]byte, 20) +// for i := 0; i < 2; i++ { +// io.WriteString(hash, "Hello, world.") +// hash.Read(b) +// fmt.Println(hex.EncodeToString(b)) +// } +// // Output: +// // 2ae01472317d1935a84797ec1983ae243fc6aa28 +// // 2ae01472317d1935a84797ec1983ae243fc6aa28 // // For hashing larger byte slices, or byte streams such as those read from // a file or socket, use Sendto with MSG_MORE to instruct the kernel to update // the hash digest instead of creating a new one for a given chunk and finalizing it. // -// // Assume hashfd and addr are already configured using the setup process. -// hash := os.NewFile(hashfd, "sha1") -// // Hash the contents of a file. -// f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz") -// b := make([]byte, 4096) -// for { -// n, err := f.Read(b) -// if err == io.EOF { -// break -// } -// unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr) -// } -// hash.Read(b) -// fmt.Println(hex.EncodeToString(b)) -// // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5 +// // Assume hashfd and addr are already configured using the setup process. +// hash := os.NewFile(hashfd, "sha1") +// // Hash the contents of a file. +// f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz") +// b := make([]byte, 4096) +// for { +// n, err := f.Read(b) +// if err == io.EOF { +// break +// } +// unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr) +// } +// hash.Read(b) +// fmt.Println(hex.EncodeToString(b)) +// // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5 // // For more information, see: http://www.chronox.de/crypto-API/crypto/userspace-if.html. type SockaddrALG struct { @@ -1499,18 +1499,13 @@ func KeyctlRestrictKeyring(ringid int, keyType string, restriction string) error //sys keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) = SYS_KEYCTL //sys keyctlRestrictKeyring(cmd int, arg2 int) (err error) = SYS_KEYCTL -func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { +func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(rsa)) msg.Namelen = uint32(SizeofSockaddrAny) - var iov Iovec - if len(p) > 0 { - iov.Base = &p[0] - iov.SetLen(len(p)) - } var dummy byte if len(oob) > 0 { - if len(p) == 0 { + if emptyIovecs(iov) { var sockType int sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) if err != nil { @@ -1518,15 +1513,19 @@ func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn } // receive at least one normal byte if sockType != SOCK_DGRAM { - iov.Base = &dummy - iov.SetLen(1) + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } } msg.Control = &oob[0] msg.SetControllen(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = recvmsg(fd, &msg, flags); err != nil { return } @@ -1535,18 +1534,15 @@ func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn return } -func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { +func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { var msg Msghdr msg.Name = (*byte)(ptr) msg.Namelen = uint32(salen) - var iov Iovec - if len(p) > 0 { - iov.Base = &p[0] - iov.SetLen(len(p)) - } var dummy byte + var empty bool if len(oob) > 0 { - if len(p) == 0 { + empty := emptyIovecs(iov) + if empty { var sockType int sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) if err != nil { @@ -1554,19 +1550,22 @@ func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags i } // send at least one normal byte if sockType != SOCK_DGRAM { - iov.Base = &dummy - iov.SetLen(1) + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) } } msg.Control = &oob[0] msg.SetControllen(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = sendmsg(fd, &msg, flags); err != nil { return 0, err } - if len(oob) > 0 && len(p) == 0 { + if len(oob) > 0 && empty { n = 0 } return n, nil diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go new file mode 100644 index 000000000..0b69c3eff --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -0,0 +1,226 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 && linux +// +build loong64,linux + +package unix + +import "unsafe" + +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fstatfs(fd int, buf *Statfs_t) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (euid int) +//sysnb Getgid() (gid int) +//sysnb Getuid() (uid int) +//sys Listen(s int, n int) (err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + var ts *Timespec + if timeout != nil { + ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} + } + return Pselect(nfd, r, w, e, ts, nil) +} + +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) + +func timespecFromStatxTimestamp(x StatxTimestamp) Timespec { + return Timespec{ + Sec: x.Sec, + Nsec: int64(x.Nsec), + } +} + +func Fstatat(fd int, path string, stat *Stat_t, flags int) error { + var r Statx_t + // Do it the glibc way, add AT_NO_AUTOMOUNT. + if err := Statx(fd, path, AT_NO_AUTOMOUNT|flags, STATX_BASIC_STATS, &r); err != nil { + return err + } + + stat.Dev = Mkdev(r.Dev_major, r.Dev_minor) + stat.Ino = r.Ino + stat.Mode = uint32(r.Mode) + stat.Nlink = r.Nlink + stat.Uid = r.Uid + stat.Gid = r.Gid + stat.Rdev = Mkdev(r.Rdev_major, r.Rdev_minor) + // hope we don't get to process files so large to overflow these size + // fields... + stat.Size = int64(r.Size) + stat.Blksize = int32(r.Blksize) + stat.Blocks = int64(r.Blocks) + stat.Atim = timespecFromStatxTimestamp(r.Atime) + stat.Mtim = timespecFromStatxTimestamp(r.Mtime) + stat.Ctim = timespecFromStatxTimestamp(r.Ctime) + + return nil +} + +func Fstat(fd int, stat *Stat_t) (err error) { + return Fstatat(fd, "", stat, AT_EMPTY_PATH) +} + +func Stat(path string, stat *Stat_t) (err error) { + return Fstatat(AT_FDCWD, path, stat, 0) +} + +func Lchown(path string, uid int, gid int) (err error) { + return Fchownat(AT_FDCWD, path, uid, gid, AT_SYMLINK_NOFOLLOW) +} + +func Lstat(path string, stat *Stat_t) (err error) { + return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW) +} + +//sys Statfs(path string, buf *Statfs_t) (err error) +//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) +//sys Truncate(path string, length int64) (err error) + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + return ENOSYS +} + +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) + +//sysnb Gettimeofday(tv *Timeval) (err error) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + err = Prlimit(0, resource, nil, rlim) + return +} + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + err = Prlimit(0, resource, rlim, nil) + return +} + +func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) { + if tv == nil { + return utimensat(dirfd, path, nil, 0) + } + + ts := []Timespec{ + NsecToTimespec(TimevalToNsec(tv[0])), + NsecToTimespec(TimevalToNsec(tv[1])), + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func Time(t *Time_t) (Time_t, error) { + var tv Timeval + err := Gettimeofday(&tv) + if err != nil { + return 0, err + } + if t != nil { + *t = Time_t(tv.Sec) + } + return Time_t(tv.Sec), nil +} + +func Utime(path string, buf *Utimbuf) error { + tv := []Timeval{ + {Sec: buf.Actime}, + {Sec: buf.Modtime}, + } + return Utimes(path, tv) +} + +func utimes(path string, tv *[2]Timeval) (err error) { + if tv == nil { + return utimensat(AT_FDCWD, path, nil, 0) + } + + ts := []Timespec{ + NsecToTimespec(TimevalToNsec(tv[0])), + NsecToTimespec(TimevalToNsec(tv[1])), + } + return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func (r *PtraceRegs) PC() uint64 { return r.Era } + +func (r *PtraceRegs) SetPC(era uint64) { r.Era = era } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint64(length) +} + +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint64(length) +} + +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + +func Pause() error { + _, err := ppoll(nil, 0, nil, nil) + return err +} + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + return Renameat2(olddirfd, oldpath, newdirfd, newpath, 0) +} + +//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) + +func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { + cmdlineLen := len(cmdline) + if cmdlineLen > 0 { + // Account for the additional NULL byte added by + // BytePtrFromString in kexecFileLoad. The kexec_file_load + // syscall expects a NULL-terminated string. + cmdlineLen++ + } + return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 8ff7adba0..925a748a3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -22,6 +22,7 @@ import "unsafe" //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) //sysnb Getuid() (uid int) //sys Listen(s int, n int) (err error) +//sys MemfdSecret(flags int) (fd int, err error) //sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 15d637d63..78daceb33 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -81,6 +81,7 @@ func Pipe(p []int) (err error) { } //sysnb pipe2(p *[2]_C_int, flags int) (err error) + func Pipe2(p []int, flags int) error { if len(p) != 2 { return EINVAL @@ -95,6 +96,7 @@ func Pipe2(p []int, flags int) error { } //sys Getdents(fd int, buf []byte) (n int, err error) + func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { n, err = Getdents(fd, buf) if err != nil || basep == nil { diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 5c2003cec..b5ec457cd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -451,26 +451,25 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.__xnet_recvmsg -func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { +func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(rsa)) msg.Namelen = uint32(SizeofSockaddrAny) - var iov Iovec - if len(p) > 0 { - iov.Base = (*int8)(unsafe.Pointer(&p[0])) - iov.SetLen(len(p)) - } - var dummy int8 + var dummy byte if len(oob) > 0 { // receive at least one normal byte - if len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + if emptyIovecs(iov) { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } msg.Accrightslen = int32(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = recvmsg(fd, &msg, flags); n == -1 { return } @@ -480,30 +479,31 @@ func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.__xnet_sendmsg -func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { +func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(ptr)) msg.Namelen = uint32(salen) - var iov Iovec - if len(p) > 0 { - iov.Base = (*int8)(unsafe.Pointer(&p[0])) - iov.SetLen(len(p)) - } - var dummy int8 + var dummy byte + var empty bool if len(oob) > 0 { // send at least one normal byte - if len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + empty = emptyIovecs(iov) + if empty { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } msg.Accrightslen = int32(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = sendmsg(fd, &msg, flags); err != nil { return 0, err } - if len(oob) > 0 && len(p) == 0 { + if len(oob) > 0 && empty { n = 0 } return n, nil @@ -618,6 +618,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Getpriority(which int, who int) (n int, err error) //sysnb Getrlimit(which int, lim *Rlimit) (err error) //sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Getsid(pid int) (sid int, err error) //sysnb Gettimeofday(tv *Timeval) (err error) //sysnb Getuid() (uid int) //sys Kill(pid int, signum syscall.Signal) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 70508afc1..1ff5060b5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -338,8 +338,13 @@ func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) { } func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + var iov [1]Iovec + if len(p) > 0 { + iov[0].Base = &p[0] + iov[0].SetLen(len(p)) + } var rsa RawSockaddrAny - n, oobn, recvflags, err = recvmsgRaw(fd, p, oob, flags, &rsa) + n, oobn, recvflags, err = recvmsgRaw(fd, iov[:], oob, flags, &rsa) // source address is only specified if the socket is unconnected if rsa.Addr.Family != AF_UNSPEC { from, err = anyToSockaddr(fd, &rsa) @@ -347,12 +352,67 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from return } +// RecvmsgBuffers receives a message from a socket using the recvmsg +// system call. The flags are passed to recvmsg. Any non-control data +// read is scattered into the buffers slices. The results are: +// - n is the number of non-control data read into bufs +// - oobn is the number of control data read into oob; this may be interpreted using [ParseSocketControlMessage] +// - recvflags is flags returned by recvmsg +// - from is the address of the sender +func RecvmsgBuffers(fd int, buffers [][]byte, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + iov := make([]Iovec, len(buffers)) + for i := range buffers { + if len(buffers[i]) > 0 { + iov[i].Base = &buffers[i][0] + iov[i].SetLen(len(buffers[i])) + } else { + iov[i].Base = (*byte)(unsafe.Pointer(&_zero)) + } + } + var rsa RawSockaddrAny + n, oobn, recvflags, err = recvmsgRaw(fd, iov, oob, flags, &rsa) + if err == nil && rsa.Addr.Family != AF_UNSPEC { + from, err = anyToSockaddr(fd, &rsa) + } + return +} + func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { _, err = SendmsgN(fd, p, oob, to, flags) return } func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { + var iov [1]Iovec + if len(p) > 0 { + iov[0].Base = &p[0] + iov[0].SetLen(len(p)) + } + var ptr unsafe.Pointer + var salen _Socklen + if to != nil { + ptr, salen, err = to.sockaddr() + if err != nil { + return 0, err + } + } + return sendmsgN(fd, iov[:], oob, ptr, salen, flags) +} + +// SendmsgBuffers sends a message on a socket to an address using the sendmsg +// system call. The flags are passed to sendmsg. Any non-control data written +// is gathered from buffers. The function returns the number of bytes written +// to the socket. +func SendmsgBuffers(fd int, buffers [][]byte, oob []byte, to Sockaddr, flags int) (n int, err error) { + iov := make([]Iovec, len(buffers)) + for i := range buffers { + if len(buffers[i]) > 0 { + iov[i].Base = &buffers[i][0] + iov[i].SetLen(len(buffers[i])) + } else { + iov[i].Base = (*byte)(unsafe.Pointer(&_zero)) + } + } var ptr unsafe.Pointer var salen _Socklen if to != nil { @@ -361,7 +421,7 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) return 0, err } } - return sendmsgN(fd, p, oob, ptr, salen, flags) + return sendmsgN(fd, iov, oob, ptr, salen, flags) } func Send(s int, buf []byte, flags int) (err error) { @@ -484,3 +544,13 @@ func Lutimes(path string, tv []Timeval) error { } return UtimesNanoAt(AT_FDCWD, path, ts, AT_SYMLINK_NOFOLLOW) } + +// emptyIovec reports whether there are no bytes in the slice of Iovec. +func emptyIovecs(iov []Iovec) bool { + for i := range iov { + if iov[i].Len > 0 { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 3de79fa25..dfa9bd938 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -184,6 +184,7 @@ const ( BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_KPROBE_MULTI_RETURN = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 @@ -191,6 +192,8 @@ const ( BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 BPF_F_TEST_STATE_FREQ = 0x8 + BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 + BPF_F_XDP_HAS_FRAGS = 0x20 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -517,9 +520,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2021-03-22)" + DM_VERSION_EXTRA = "-ioctl (2022-02-22)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x2d + DM_VERSION_MINOR = 0x2e DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -712,6 +715,7 @@ const ( ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be ETH_P_ERSPAN2 = 0x22eb + ETH_P_ETHERCAT = 0x88a4 ETH_P_FCOE = 0x8906 ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 @@ -749,6 +753,7 @@ const ( ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 ETH_P_PREAUTH = 0x88c7 + ETH_P_PROFINET = 0x8892 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -837,6 +842,7 @@ const ( FAN_FS_ERROR = 0x8000 FAN_MARK_ADD = 0x1 FAN_MARK_DONT_FOLLOW = 0x4 + FAN_MARK_EVICTABLE = 0x200 FAN_MARK_FILESYSTEM = 0x100 FAN_MARK_FLUSH = 0x80 FAN_MARK_IGNORED_MASK = 0x20 @@ -1055,7 +1061,7 @@ const ( IFA_F_STABLE_PRIVACY = 0x800 IFA_F_TEMPORARY = 0x1 IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa + IFA_MAX = 0xb IFF_ALLMULTI = 0x200 IFF_ATTACH_QUEUE = 0x200 IFF_AUTOMEDIA = 0x4000 @@ -1310,6 +1316,7 @@ const ( KEXEC_ARCH_ARM = 0x280000 KEXEC_ARCH_DEFAULT = 0x0 KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_LOONGARCH = 0x1020000 KEXEC_ARCH_MASK = 0xffff0000 KEXEC_ARCH_MIPS = 0x80000 KEXEC_ARCH_MIPS_LE = 0xa0000 @@ -1402,6 +1409,7 @@ const ( LANDLOCK_ACCESS_FS_MAKE_SYM = 0x1000 LANDLOCK_ACCESS_FS_READ_DIR = 0x8 LANDLOCK_ACCESS_FS_READ_FILE = 0x4 + LANDLOCK_ACCESS_FS_REFER = 0x2000 LANDLOCK_ACCESS_FS_REMOVE_DIR = 0x10 LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 @@ -1757,6 +1765,7 @@ const ( NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 + NLM_F_BULK = 0x200 NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 @@ -2074,6 +2083,11 @@ const ( PR_SET_UNALIGN = 0x6 PR_SET_VMA = 0x53564d41 PR_SET_VMA_ANON_NAME = 0x0 + PR_SME_GET_VL = 0x40 + PR_SME_SET_VL = 0x3f + PR_SME_SET_VL_ONEXEC = 0x40000 + PR_SME_VL_INHERIT = 0x20000 + PR_SME_VL_LEN_MASK = 0xffff PR_SPEC_DISABLE = 0x4 PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 @@ -2226,8 +2240,9 @@ const ( RTC_FEATURE_ALARM = 0x0 RTC_FEATURE_ALARM_RES_2S = 0x3 RTC_FEATURE_ALARM_RES_MINUTE = 0x1 + RTC_FEATURE_ALARM_WAKEUP_ONLY = 0x7 RTC_FEATURE_BACKUP_SWITCH_MODE = 0x6 - RTC_FEATURE_CNT = 0x7 + RTC_FEATURE_CNT = 0x8 RTC_FEATURE_CORRECTION = 0x5 RTC_FEATURE_NEED_WEEK_DAY = 0x2 RTC_FEATURE_UPDATE_INTERRUPT = 0x4 @@ -2301,6 +2316,7 @@ const ( RTM_DELRULE = 0x21 RTM_DELTCLASS = 0x29 RTM_DELTFILTER = 0x2d + RTM_DELTUNNEL = 0x79 RTM_DELVLAN = 0x71 RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 @@ -2333,8 +2349,9 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e + RTM_GETTUNNEL = 0x7a RTM_GETVLAN = 0x72 - RTM_MAX = 0x77 + RTM_MAX = 0x7b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -2358,11 +2375,13 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x1a - RTM_NR_MSGTYPES = 0x68 + RTM_NEWTUNNEL = 0x78 + RTM_NR_FAMILIES = 0x1b + RTM_NR_MSGTYPES = 0x6c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 + RTM_SETSTATS = 0x5f RTNH_ALIGNTO = 0x4 RTNH_COMPARE_MASK = 0x59 RTNH_F_DEAD = 0x1 @@ -2543,6 +2562,9 @@ const ( SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 SOCK_SNDBUF_LOCK = 0x1 + SOCK_TXREHASH_DEFAULT = 0xff + SOCK_TXREHASH_DISABLED = 0x0 + SOCK_TXREHASH_ENABLED = 0x1 SOL_AAL = 0x109 SOL_ALG = 0x117 SOL_ATM = 0x108 @@ -2558,6 +2580,8 @@ const ( SOL_IUCV = 0x115 SOL_KCM = 0x119 SOL_LLC = 0x10c + SOL_MCTP = 0x11d + SOL_MPTCP = 0x11c SOL_NETBEUI = 0x10b SOL_NETLINK = 0x10e SOL_NFC = 0x118 @@ -2673,7 +2697,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xb + TASKSTATS_VERSION = 0xd TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 234fd4a5d..274e2dabd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -5,7 +5,7 @@ // +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 _const.go package unix @@ -326,6 +326,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -350,6 +351,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 58619b758..95b6eeedf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -5,7 +5,7 @@ // +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 _const.go package unix @@ -327,6 +327,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -351,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 3a64ff59d..918cd130e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -5,7 +5,7 @@ // +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -333,6 +333,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -357,6 +358,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index abe0b9257..3907dc5a9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -5,7 +5,7 @@ // +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char _const.go package unix @@ -323,6 +323,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -347,6 +348,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 @@ -511,6 +513,7 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + ZA_MAGIC = 0x54366345 _HIDIOCGRAWNAME = 0x80804804 _HIDIOCGRAWPHYS = 0x80404805 _HIDIOCGRAWUNIQ = 0x80404808 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go new file mode 100644 index 000000000..03d5c105a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -0,0 +1,818 @@ +// mkerrors.sh -Wall -Werror -static -I/tmp/include +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build loong64 && linux +// +build loong64,linux + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go + +package unix + +import "syscall" + +const ( + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x80081270 + BLKBSZSET = 0x40081271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80081272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d + FLUSHO = 0x1000 + FPU_CTX_MAGIC = 0x46505501 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 + FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + F_GETLK = 0x5 + F_GETLK64 = 0x5 + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x8000 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 + NFDBITS = 0x40 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 + OLCUC = 0x2 + ONLCR = 0x4 + OTPERASE = 0x400c4d19 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x4000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCBRIDGECHAN = 0x40047435 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8010743f + PPPIOCGIDLE32 = 0x8008743f + PPPIOCGIDLE64 = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCSACTIVE = 0x40107446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x4010744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40107447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCUNBRIDGECHAN = 0x7434 + PPPIOCXFERUNIT = 0x744e + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_SYSEMU = 0x1f + PTRACE_SYSEMU_SINGLESTEP = 0x20 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x40085203 + RNDADDTOENTCNT = 0x40045201 + RNDCLEARPOOL = 0x5206 + RNDGETENTCNT = 0x80045200 + RNDGETPOOL = 0x80085202 + RNDRESEEDCRNG = 0x5207 + RNDZAPENTCNT = 0x5204 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_PARAM_GET = 0x40187013 + RTC_PARAM_SET = 0x40187014 + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 + SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b + SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 + SO_RESERVE_MEM = 0x49 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x80285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x401054d5 + TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x801054db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 + TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VMIN = 0x6 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WORDSIZE = 0x40 + XCASE = 0x4 + XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 +) + +// Errors +const ( + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + ECANCELED = syscall.Errno(0x7d) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x23) + EDESTADDRREQ = syscall.Errno(0x59) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EISCONN = syscall.Errno(0x6a) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTCONN = syscall.Errno(0x6b) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTUNIQ = syscall.Errno(0x4c) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPFNOSUPPORT = syscall.Errno(0x60) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGIO = syscall.Signal(0x1d) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 14d7a8439..bd794e010 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -5,7 +5,7 @@ // +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -326,6 +326,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 @@ -351,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 99e7c4ac0..6c741b054 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -5,7 +5,7 @@ // +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -326,6 +326,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 @@ -351,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 496364c33..807b8cd2a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -5,7 +5,7 @@ // +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -326,6 +326,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 @@ -351,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 3e4083085..a39e4f5c2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -5,7 +5,7 @@ // +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -326,6 +326,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 @@ -351,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 1151a7dfa..c0fcda86b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -5,7 +5,7 @@ // +build ppc,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -381,6 +381,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 @@ -405,6 +406,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index ed17f249e..f3b72407a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -5,7 +5,7 @@ // +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -385,6 +385,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 @@ -409,6 +410,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index d84a37c1a..72f2a45d5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -5,7 +5,7 @@ // +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -385,6 +385,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 @@ -409,6 +410,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 5cafba83f..45b214b4d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -5,7 +5,7 @@ // +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -314,6 +314,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -338,6 +339,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 6d122da41..1897f207b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -5,7 +5,7 @@ // +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char _const.go package unix @@ -389,6 +389,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -413,6 +414,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 6bd19e51d..1fb7a3953 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -5,7 +5,7 @@ // +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -380,6 +380,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x100b SO_RCVLOWAT = 0x800 + SO_RCVMARK = 0x54 SO_RCVTIMEO = 0x2000 SO_RCVTIMEO_NEW = 0x44 SO_RCVTIMEO_OLD = 0x2000 @@ -404,6 +405,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x42 SO_TIMESTAMPNS_OLD = 0x21 SO_TIMESTAMP_NEW = 0x46 + SO_TXREHASH = 0x53 SO_TXTIME = 0x3f SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x25 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 879376589..467deed76 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -1643,6 +1643,30 @@ var libc_mknod_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 8da90cf0e..7e308a476 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -600,6 +600,12 @@ TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) + +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index f47eedd5a..35938d34f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -1643,6 +1643,30 @@ var libc_mknod_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 4d26f7d01..b09e5bb0e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -600,6 +600,12 @@ TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) + +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go new file mode 100644 index 000000000..523f2ba03 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go @@ -0,0 +1,527 @@ +// go run mksyscall.go -tags linux,loong64 syscall_linux.go syscall_linux_loong64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build linux && loong64 +// +build linux,loong64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_PWAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index a1a9bcbbd..1239cc2de 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -180,6 +180,17 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MemfdSecret(flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_MEMFD_SECRET, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index d12f4fbfe..fdf53f8da 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -66,6 +66,7 @@ import ( //go:cgo_import_dynamic libc_getpriority getpriority "libc.so" //go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" //go:cgo_import_dynamic libc_getrusage getrusage "libc.so" +//go:cgo_import_dynamic libc_getsid getsid "libc.so" //go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" //go:cgo_import_dynamic libc_getuid getuid "libc.so" //go:cgo_import_dynamic libc_kill kill "libc.so" @@ -202,6 +203,7 @@ import ( //go:linkname procGetpriority libc_getpriority //go:linkname procGetrlimit libc_getrlimit //go:linkname procGetrusage libc_getrusage +//go:linkname procGetsid libc_getsid //go:linkname procGettimeofday libc_gettimeofday //go:linkname procGetuid libc_getuid //go:linkname procKill libc_kill @@ -339,6 +341,7 @@ var ( procGetpriority, procGetrlimit, procGetrusage, + procGetsid, procGettimeofday, procGetuid, procKill, @@ -1044,6 +1047,17 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetsid)), 1, uintptr(pid), 0, 0, 0, 0, 0) + sid = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go new file mode 100644 index 000000000..44a764c99 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -0,0 +1,311 @@ +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build loong64 && linux +// +build loong64,linux + +package unix + +const ( + SYS_IO_SETUP = 0 + SYS_IO_DESTROY = 1 + SYS_IO_SUBMIT = 2 + SYS_IO_CANCEL = 3 + SYS_IO_GETEVENTS = 4 + SYS_SETXATTR = 5 + SYS_LSETXATTR = 6 + SYS_FSETXATTR = 7 + SYS_GETXATTR = 8 + SYS_LGETXATTR = 9 + SYS_FGETXATTR = 10 + SYS_LISTXATTR = 11 + SYS_LLISTXATTR = 12 + SYS_FLISTXATTR = 13 + SYS_REMOVEXATTR = 14 + SYS_LREMOVEXATTR = 15 + SYS_FREMOVEXATTR = 16 + SYS_GETCWD = 17 + SYS_LOOKUP_DCOOKIE = 18 + SYS_EVENTFD2 = 19 + SYS_EPOLL_CREATE1 = 20 + SYS_EPOLL_CTL = 21 + SYS_EPOLL_PWAIT = 22 + SYS_DUP = 23 + SYS_DUP3 = 24 + SYS_FCNTL = 25 + SYS_INOTIFY_INIT1 = 26 + SYS_INOTIFY_ADD_WATCH = 27 + SYS_INOTIFY_RM_WATCH = 28 + SYS_IOCTL = 29 + SYS_IOPRIO_SET = 30 + SYS_IOPRIO_GET = 31 + SYS_FLOCK = 32 + SYS_MKNODAT = 33 + SYS_MKDIRAT = 34 + SYS_UNLINKAT = 35 + SYS_SYMLINKAT = 36 + SYS_LINKAT = 37 + SYS_UMOUNT2 = 39 + SYS_MOUNT = 40 + SYS_PIVOT_ROOT = 41 + SYS_NFSSERVCTL = 42 + SYS_STATFS = 43 + SYS_FSTATFS = 44 + SYS_TRUNCATE = 45 + SYS_FTRUNCATE = 46 + SYS_FALLOCATE = 47 + SYS_FACCESSAT = 48 + SYS_CHDIR = 49 + SYS_FCHDIR = 50 + SYS_CHROOT = 51 + SYS_FCHMOD = 52 + SYS_FCHMODAT = 53 + SYS_FCHOWNAT = 54 + SYS_FCHOWN = 55 + SYS_OPENAT = 56 + SYS_CLOSE = 57 + SYS_VHANGUP = 58 + SYS_PIPE2 = 59 + SYS_QUOTACTL = 60 + SYS_GETDENTS64 = 61 + SYS_LSEEK = 62 + SYS_READ = 63 + SYS_WRITE = 64 + SYS_READV = 65 + SYS_WRITEV = 66 + SYS_PREAD64 = 67 + SYS_PWRITE64 = 68 + SYS_PREADV = 69 + SYS_PWRITEV = 70 + SYS_SENDFILE = 71 + SYS_PSELECT6 = 72 + SYS_PPOLL = 73 + SYS_SIGNALFD4 = 74 + SYS_VMSPLICE = 75 + SYS_SPLICE = 76 + SYS_TEE = 77 + SYS_READLINKAT = 78 + SYS_SYNC = 81 + SYS_FSYNC = 82 + SYS_FDATASYNC = 83 + SYS_SYNC_FILE_RANGE = 84 + SYS_TIMERFD_CREATE = 85 + SYS_TIMERFD_SETTIME = 86 + SYS_TIMERFD_GETTIME = 87 + SYS_UTIMENSAT = 88 + SYS_ACCT = 89 + SYS_CAPGET = 90 + SYS_CAPSET = 91 + SYS_PERSONALITY = 92 + SYS_EXIT = 93 + SYS_EXIT_GROUP = 94 + SYS_WAITID = 95 + SYS_SET_TID_ADDRESS = 96 + SYS_UNSHARE = 97 + SYS_FUTEX = 98 + SYS_SET_ROBUST_LIST = 99 + SYS_GET_ROBUST_LIST = 100 + SYS_NANOSLEEP = 101 + SYS_GETITIMER = 102 + SYS_SETITIMER = 103 + SYS_KEXEC_LOAD = 104 + SYS_INIT_MODULE = 105 + SYS_DELETE_MODULE = 106 + SYS_TIMER_CREATE = 107 + SYS_TIMER_GETTIME = 108 + SYS_TIMER_GETOVERRUN = 109 + SYS_TIMER_SETTIME = 110 + SYS_TIMER_DELETE = 111 + SYS_CLOCK_SETTIME = 112 + SYS_CLOCK_GETTIME = 113 + SYS_CLOCK_GETRES = 114 + SYS_CLOCK_NANOSLEEP = 115 + SYS_SYSLOG = 116 + SYS_PTRACE = 117 + SYS_SCHED_SETPARAM = 118 + SYS_SCHED_SETSCHEDULER = 119 + SYS_SCHED_GETSCHEDULER = 120 + SYS_SCHED_GETPARAM = 121 + SYS_SCHED_SETAFFINITY = 122 + SYS_SCHED_GETAFFINITY = 123 + SYS_SCHED_YIELD = 124 + SYS_SCHED_GET_PRIORITY_MAX = 125 + SYS_SCHED_GET_PRIORITY_MIN = 126 + SYS_SCHED_RR_GET_INTERVAL = 127 + SYS_RESTART_SYSCALL = 128 + SYS_KILL = 129 + SYS_TKILL = 130 + SYS_TGKILL = 131 + SYS_SIGALTSTACK = 132 + SYS_RT_SIGSUSPEND = 133 + SYS_RT_SIGACTION = 134 + SYS_RT_SIGPROCMASK = 135 + SYS_RT_SIGPENDING = 136 + SYS_RT_SIGTIMEDWAIT = 137 + SYS_RT_SIGQUEUEINFO = 138 + SYS_RT_SIGRETURN = 139 + SYS_SETPRIORITY = 140 + SYS_GETPRIORITY = 141 + SYS_REBOOT = 142 + SYS_SETREGID = 143 + SYS_SETGID = 144 + SYS_SETREUID = 145 + SYS_SETUID = 146 + SYS_SETRESUID = 147 + SYS_GETRESUID = 148 + SYS_SETRESGID = 149 + SYS_GETRESGID = 150 + SYS_SETFSUID = 151 + SYS_SETFSGID = 152 + SYS_TIMES = 153 + SYS_SETPGID = 154 + SYS_GETPGID = 155 + SYS_GETSID = 156 + SYS_SETSID = 157 + SYS_GETGROUPS = 158 + SYS_SETGROUPS = 159 + SYS_UNAME = 160 + SYS_SETHOSTNAME = 161 + SYS_SETDOMAINNAME = 162 + SYS_GETRUSAGE = 165 + SYS_UMASK = 166 + SYS_PRCTL = 167 + SYS_GETCPU = 168 + SYS_GETTIMEOFDAY = 169 + SYS_SETTIMEOFDAY = 170 + SYS_ADJTIMEX = 171 + SYS_GETPID = 172 + SYS_GETPPID = 173 + SYS_GETUID = 174 + SYS_GETEUID = 175 + SYS_GETGID = 176 + SYS_GETEGID = 177 + SYS_GETTID = 178 + SYS_SYSINFO = 179 + SYS_MQ_OPEN = 180 + SYS_MQ_UNLINK = 181 + SYS_MQ_TIMEDSEND = 182 + SYS_MQ_TIMEDRECEIVE = 183 + SYS_MQ_NOTIFY = 184 + SYS_MQ_GETSETATTR = 185 + SYS_MSGGET = 186 + SYS_MSGCTL = 187 + SYS_MSGRCV = 188 + SYS_MSGSND = 189 + SYS_SEMGET = 190 + SYS_SEMCTL = 191 + SYS_SEMTIMEDOP = 192 + SYS_SEMOP = 193 + SYS_SHMGET = 194 + SYS_SHMCTL = 195 + SYS_SHMAT = 196 + SYS_SHMDT = 197 + SYS_SOCKET = 198 + SYS_SOCKETPAIR = 199 + SYS_BIND = 200 + SYS_LISTEN = 201 + SYS_ACCEPT = 202 + SYS_CONNECT = 203 + SYS_GETSOCKNAME = 204 + SYS_GETPEERNAME = 205 + SYS_SENDTO = 206 + SYS_RECVFROM = 207 + SYS_SETSOCKOPT = 208 + SYS_GETSOCKOPT = 209 + SYS_SHUTDOWN = 210 + SYS_SENDMSG = 211 + SYS_RECVMSG = 212 + SYS_READAHEAD = 213 + SYS_BRK = 214 + SYS_MUNMAP = 215 + SYS_MREMAP = 216 + SYS_ADD_KEY = 217 + SYS_REQUEST_KEY = 218 + SYS_KEYCTL = 219 + SYS_CLONE = 220 + SYS_EXECVE = 221 + SYS_MMAP = 222 + SYS_FADVISE64 = 223 + SYS_SWAPON = 224 + SYS_SWAPOFF = 225 + SYS_MPROTECT = 226 + SYS_MSYNC = 227 + SYS_MLOCK = 228 + SYS_MUNLOCK = 229 + SYS_MLOCKALL = 230 + SYS_MUNLOCKALL = 231 + SYS_MINCORE = 232 + SYS_MADVISE = 233 + SYS_REMAP_FILE_PAGES = 234 + SYS_MBIND = 235 + SYS_GET_MEMPOLICY = 236 + SYS_SET_MEMPOLICY = 237 + SYS_MIGRATE_PAGES = 238 + SYS_MOVE_PAGES = 239 + SYS_RT_TGSIGQUEUEINFO = 240 + SYS_PERF_EVENT_OPEN = 241 + SYS_ACCEPT4 = 242 + SYS_RECVMMSG = 243 + SYS_ARCH_SPECIFIC_SYSCALL = 244 + SYS_WAIT4 = 260 + SYS_PRLIMIT64 = 261 + SYS_FANOTIFY_INIT = 262 + SYS_FANOTIFY_MARK = 263 + SYS_NAME_TO_HANDLE_AT = 264 + SYS_OPEN_BY_HANDLE_AT = 265 + SYS_CLOCK_ADJTIME = 266 + SYS_SYNCFS = 267 + SYS_SETNS = 268 + SYS_SENDMMSG = 269 + SYS_PROCESS_VM_READV = 270 + SYS_PROCESS_VM_WRITEV = 271 + SYS_KCMP = 272 + SYS_FINIT_MODULE = 273 + SYS_SCHED_SETATTR = 274 + SYS_SCHED_GETATTR = 275 + SYS_RENAMEAT2 = 276 + SYS_SECCOMP = 277 + SYS_GETRANDOM = 278 + SYS_MEMFD_CREATE = 279 + SYS_BPF = 280 + SYS_EXECVEAT = 281 + SYS_USERFAULTFD = 282 + SYS_MEMBARRIER = 283 + SYS_MLOCK2 = 284 + SYS_COPY_FILE_RANGE = 285 + SYS_PREADV2 = 286 + SYS_PWRITEV2 = 287 + SYS_PKEY_MPROTECT = 288 + SYS_PKEY_ALLOC = 289 + SYS_PKEY_FREE = 290 + SYS_STATX = 291 + SYS_IO_PGETEVENTS = 292 + SYS_RSEQ = 293 + SYS_KEXEC_FILE_LOAD = 294 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index c3a5af862..3a9c96b28 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -309,6 +309,7 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_MEMFD_SECRET = 447 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 885842c0e..e2a64f099 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -366,30 +366,57 @@ type ICMPv6Filter struct { Filt [8]uint32 } +type TCPConnectionInfo struct { + State uint8 + Snd_wscale uint8 + Rcv_wscale uint8 + _ uint8 + Options uint32 + Flags uint32 + Rto uint32 + Maxseg uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Snd_wnd uint32 + Snd_sbbytes uint32 + Rcv_wnd uint32 + Rttcur uint32 + Srtt uint32 + Rttvar uint32 + Txpackets uint64 + Txbytes uint64 + Txretransmitbytes uint64 + Rxpackets uint64 + Rxbytes uint64 + Rxoutoforderbytes uint64 + Txretransmitpackets uint64 +} + const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x6c - SizeofSockaddrUnix = 0x6a - SizeofSockaddrDatalink = 0x14 - SizeofSockaddrCtl = 0x20 - SizeofSockaddrVM = 0xc - SizeofXvsockpcb = 0xa8 - SizeofXSocket = 0x64 - SizeofXSockbuf = 0x18 - SizeofXVSockPgen = 0x20 - SizeofXucred = 0x4c - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofMsghdr = 0x30 - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofSockaddrCtl = 0x20 + SizeofSockaddrVM = 0xc + SizeofXvsockpcb = 0xa8 + SizeofXSocket = 0x64 + SizeofXSockbuf = 0x18 + SizeofXVSockPgen = 0x20 + SizeofXucred = 0x4c + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofTCPConnectionInfo = 0x70 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index b23c02337..34aa77521 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -366,30 +366,57 @@ type ICMPv6Filter struct { Filt [8]uint32 } +type TCPConnectionInfo struct { + State uint8 + Snd_wscale uint8 + Rcv_wscale uint8 + _ uint8 + Options uint32 + Flags uint32 + Rto uint32 + Maxseg uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Snd_wnd uint32 + Snd_sbbytes uint32 + Rcv_wnd uint32 + Rttcur uint32 + Srtt uint32 + Rttvar uint32 + Txpackets uint64 + Txbytes uint64 + Txretransmitbytes uint64 + Rxpackets uint64 + Rxbytes uint64 + Rxoutoforderbytes uint64 + Txretransmitpackets uint64 +} + const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x6c - SizeofSockaddrUnix = 0x6a - SizeofSockaddrDatalink = 0x14 - SizeofSockaddrCtl = 0x20 - SizeofSockaddrVM = 0xc - SizeofXvsockpcb = 0xa8 - SizeofXSocket = 0x64 - SizeofXSockbuf = 0x18 - SizeofXVSockPgen = 0x20 - SizeofXucred = 0x4c - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofMsghdr = 0x30 - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofSockaddrCtl = 0x20 + SizeofSockaddrVM = 0xc + SizeofXvsockpcb = 0xa8 + SizeofXSocket = 0x64 + SizeofXSockbuf = 0x18 + SizeofXVSockPgen = 0x20 + SizeofXucred = 0x4c + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofTCPConnectionInfo = 0x70 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 9962d26bb..e62611e53 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1127,7 +1127,9 @@ const ( PERF_BR_SYSRET = 0x8 PERF_BR_COND_CALL = 0x9 PERF_BR_COND_RET = 0xa - PERF_BR_MAX = 0xb + PERF_BR_ERET = 0xb + PERF_BR_IRQ = 0xc + PERF_BR_MAX = 0xd PERF_SAMPLE_REGS_ABI_NONE = 0x0 PERF_SAMPLE_REGS_ABI_32 = 0x1 PERF_SAMPLE_REGS_ABI_64 = 0x2 @@ -2969,7 +2971,7 @@ const ( DEVLINK_CMD_TRAP_POLICER_NEW = 0x47 DEVLINK_CMD_TRAP_POLICER_DEL = 0x48 DEVLINK_CMD_HEALTH_REPORTER_TEST = 0x49 - DEVLINK_CMD_MAX = 0x4d + DEVLINK_CMD_MAX = 0x51 DEVLINK_PORT_TYPE_NOTSET = 0x0 DEVLINK_PORT_TYPE_AUTO = 0x1 DEVLINK_PORT_TYPE_ETH = 0x2 @@ -3198,7 +3200,7 @@ const ( DEVLINK_ATTR_RATE_NODE_NAME = 0xa8 DEVLINK_ATTR_RATE_PARENT_NODE_NAME = 0xa9 DEVLINK_ATTR_REGION_MAX_SNAPSHOTS = 0xaa - DEVLINK_ATTR_MAX = 0xaa + DEVLINK_ATTR_MAX = 0xae DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 @@ -3638,7 +3640,11 @@ const ( ETHTOOL_A_RINGS_RX_MINI = 0x7 ETHTOOL_A_RINGS_RX_JUMBO = 0x8 ETHTOOL_A_RINGS_TX = 0x9 - ETHTOOL_A_RINGS_MAX = 0xa + ETHTOOL_A_RINGS_RX_BUF_LEN = 0xa + ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 0xb + ETHTOOL_A_RINGS_CQE_SIZE = 0xc + ETHTOOL_A_RINGS_TX_PUSH = 0xd + ETHTOOL_A_RINGS_MAX = 0xd ETHTOOL_A_CHANNELS_UNSPEC = 0x0 ETHTOOL_A_CHANNELS_HEADER = 0x1 ETHTOOL_A_CHANNELS_RX_MAX = 0x2 @@ -4323,7 +4329,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x135 + NL80211_ATTR_MAX = 0x137 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -4549,7 +4555,7 @@ const ( NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY = 0x3 NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE = 0x5 NL80211_BAND_IFTYPE_ATTR_IFTYPES = 0x1 - NL80211_BAND_IFTYPE_ATTR_MAX = 0x7 + NL80211_BAND_IFTYPE_ATTR_MAX = 0xb NL80211_BAND_S1GHZ = 0x4 NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE = 0x2 NL80211_BITRATE_ATTR_MAX = 0x2 @@ -4887,7 +4893,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x19 + NL80211_FREQUENCY_ATTR_MAX = 0x1b NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5254,7 +5260,7 @@ const ( NL80211_RATE_INFO_HE_RU_ALLOC_52 = 0x1 NL80211_RATE_INFO_HE_RU_ALLOC_996 = 0x5 NL80211_RATE_INFO_HE_RU_ALLOC = 0x11 - NL80211_RATE_INFO_MAX = 0x11 + NL80211_RATE_INFO_MAX = 0x16 NL80211_RATE_INFO_MCS = 0x2 NL80211_RATE_INFO_SHORT_GI = 0x4 NL80211_RATE_INFO_VHT_MCS = 0x6 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 531409256..7551af483 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux @@ -324,6 +324,13 @@ type Taskstats struct { Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index b02ab83db..3e738ac0b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux @@ -338,6 +338,12 @@ type Taskstats struct { Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 9e6871d2e..6183eef4a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux @@ -315,6 +315,13 @@ type Taskstats struct { Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index b732d1255..968cecb17 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux @@ -317,6 +317,12 @@ type Taskstats struct { Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go new file mode 100644 index 000000000..8fe4c522a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -0,0 +1,685 @@ +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build loong64 && linux +// +build loong64,linux + +package unix + +const ( + SizeofPtr = 0x8 + SizeofLong = 0x8 +) + +type ( + _C_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Timex struct { + Modes uint32 + Offset int64 + Freq int64 + Maxerror int64 + Esterror int64 + Status int32 + Constant int64 + Precision int64 + Tolerance int64 + Time Timeval + Tick int64 + Ppsfreq int64 + Jitter int64 + Shift int32 + Stabil int64 + Jitcnt int64 + Calcnt int64 + Errcnt int64 + Stbcnt int64 + Tai int32 + _ [44]byte +} + +type Time_t int64 + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Stat_t struct { + Dev uint64 + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + _ uint64 + Size int64 + Blksize int32 + _ int32 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + _ [2]int32 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + _ [5]byte +} + +type Flock_t struct { + Type int16 + Whence int16 + Start int64 + Len int64 + Pid int32 + _ [4]byte +} + +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + +const ( + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]int8 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + _ [4]byte +} + +type Cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + +const ( + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 +) + +const ( + SizeofSockFprog = 0x10 +) + +type PtraceRegs struct { + Regs [32]uint64 + Orig_a0 uint64 + Era uint64 + Badv uint64 + Reserved [10]uint64 +} + +type FdSet struct { + Bits [16]int64 +} + +type Sysinfo_t struct { + Uptime int64 + Loads [3]uint64 + Totalram uint64 + Freeram uint64 + Sharedram uint64 + Bufferram uint64 + Totalswap uint64 + Freeswap uint64 + Procs uint16 + Pad uint16 + Totalhigh uint64 + Freehigh uint64 + Unit uint32 + _ [0]int8 + _ [4]byte +} + +type Ustat_t struct { + Tfree int32 + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + _ [4]byte +} + +type EpollEvent struct { + Events uint32 + _ int32 + Fd int32 + Pad int32 +} + +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + +const ( + POLLRDHUP = 0x2000 +) + +type Sigset_t struct { + Val [16]uint64 +} + +const _C__NSIG = 0x41 + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [19]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Taskstats struct { + Version uint16 + Ac_exitcode uint32 + Ac_flag uint8 + Ac_nice uint8 + Cpu_count uint64 + Cpu_delay_total uint64 + Blkio_count uint64 + Blkio_delay_total uint64 + Swapin_count uint64 + Swapin_delay_total uint64 + Cpu_run_real_total uint64 + Cpu_run_virtual_total uint64 + Ac_comm [32]int8 + Ac_sched uint8 + Ac_pad [3]uint8 + _ [4]byte + Ac_uid uint32 + Ac_gid uint32 + Ac_pid uint32 + Ac_ppid uint32 + Ac_btime uint32 + Ac_etime uint64 + Ac_utime uint64 + Ac_stime uint64 + Ac_minflt uint64 + Ac_majflt uint64 + Coremem uint64 + Virtmem uint64 + Hiwater_rss uint64 + Hiwater_vm uint64 + Read_char uint64 + Write_char uint64 + Read_syscalls uint64 + Write_syscalls uint64 + Read_bytes uint64 + Write_bytes uint64 + Cancelled_write_bytes uint64 + Nvcsw uint64 + Nivcsw uint64 + Ac_utimescaled uint64 + Ac_stimescaled uint64 + Cpu_scaled_run_real_total uint64 + Freepages_count uint64 + Freepages_delay_total uint64 + Thrashing_count uint64 + Thrashing_delay_total uint64 + Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 +} + +type cpuMask uint64 + +const ( + _NCPUBITS = 0x40 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + _ [118]int8 + _ uint64 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + Start uint64 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +type TpacketHdr struct { + Status uint64 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Usec uint32 + _ [4]byte +} + +const ( + SizeofTpacketHdr = 0x20 +) + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} + +type BlkpgPartition struct { + Start int64 + Length int64 + Pno int32 + Devname [64]uint8 + Volname [64]uint8 + _ [4]byte +} + +const ( + BLKPG = 0x1269 +) + +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x800870a1 + PPS_SETPARAMS = 0x400870a2 + PPS_GETCAP = 0x800870a3 + PPS_FETCH = 0xc00870a4 +) + +const ( + PIDFD_NONBLOCK = 0x800 +) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ [0]uint8 + Seq uint16 + _ uint16 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 5310f71ea..11426a301 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux @@ -320,6 +320,13 @@ type Taskstats struct { Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 219bbb126..ad1c3b3de 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux @@ -320,6 +320,12 @@ type Taskstats struct { Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index be9432da5..15fd84e4d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux @@ -320,6 +320,12 @@ type Taskstats struct { Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index d0155a42e..49c49825a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux @@ -320,6 +320,13 @@ type Taskstats struct { Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 01c17bcc6..cd36d0da2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux @@ -327,6 +327,13 @@ type Taskstats struct { Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 944a9c3c7..8c6fce039 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux @@ -327,6 +327,12 @@ type Taskstats struct { Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 5d2c90e1c..20910f2ad 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux @@ -327,6 +327,12 @@ type Taskstats struct { Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index e173cb515..71b7b3331 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux @@ -345,6 +345,12 @@ type Taskstats struct { Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 6106715d5..71184cc2c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux @@ -340,6 +340,12 @@ type Taskstats struct { Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index ca7b37b4b..06156285d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux @@ -322,6 +322,12 @@ type Taskstats struct { Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index baf5fe650..2ed718ca0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -94,10 +94,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte Pad_cgo_0 [2]byte Mount_info [160]byte } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index e21ae8ecf..b4fb97ebe 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -96,10 +96,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index f190651cd..2c4675040 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -98,10 +98,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index 84747c582..ddee04514 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -94,10 +94,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index ac5c8b637..eb13d4e8b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -94,10 +94,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index ad4aad279..c1a9b83ad 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -178,7 +178,7 @@ type Linger struct { } type Iovec struct { - Base *int8 + Base *byte Len uint64 } diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go index 855698bb2..75980fd44 100644 --- a/vendor/golang.org/x/sys/windows/exec_windows.go +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -15,11 +15,11 @@ import ( // in http://msdn.microsoft.com/en-us/library/ms880421. // This function returns "" (2 double quotes) if s is empty. // Alternatively, these transformations are done: -// - every back slash (\) is doubled, but only if immediately -// followed by double quote ("); -// - every double quote (") is escaped by back slash (\); -// - finally, s is wrapped with double quotes (arg -> "arg"), -// but only if there is space or tab inside s. +// - every back slash (\) is doubled, but only if immediately +// followed by double quote ("); +// - every double quote (") is escaped by back slash (\); +// - finally, s is wrapped with double quotes (arg -> "arg"), +// but only if there is space or tab inside s. func EscapeArg(s string) string { if len(s) == 0 { return "\"\"" diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index ce3075c45..636e5de60 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -623,7 +623,6 @@ var ( func getStdHandle(stdhandle uint32) (fd Handle) { r, _ := GetStdHandle(stdhandle) - CloseOnExec(r) return r } diff --git a/vendor/golang.org/x/xerrors/doc.go b/vendor/golang.org/x/xerrors/doc.go index eef99d9d5..2ef99f5a8 100644 --- a/vendor/golang.org/x/xerrors/doc.go +++ b/vendor/golang.org/x/xerrors/doc.go @@ -5,7 +5,8 @@ // Package xerrors implements functions to manipulate errors. // // This package is based on the Go 2 proposal for error values: -// https://golang.org/design/29934-error-values +// +// https://golang.org/design/29934-error-values // // These functions were incorporated into the standard library's errors package // in Go 1.13: diff --git a/vendor/golang.org/x/xerrors/fmt.go b/vendor/golang.org/x/xerrors/fmt.go index 829862ddf..27a5d70bd 100644 --- a/vendor/golang.org/x/xerrors/fmt.go +++ b/vendor/golang.org/x/xerrors/fmt.go @@ -33,6 +33,9 @@ const percentBangString = "%!" // It is invalid to include more than one %w verb or to supply it with an // operand that does not implement the error interface. The %w verb is otherwise // a synonym for %v. +// +// Note that as of Go 1.13, the fmt.Errorf function will do error formatting, +// but it will not capture a stack backtrace. func Errorf(format string, a ...interface{}) error { format = formatPlusW(format) // Support a ": %[wsv]" suffix, which works well with xerrors.Formatter. diff --git a/vendor/golang.org/x/xerrors/wrap.go b/vendor/golang.org/x/xerrors/wrap.go index 9a3b51037..9842758ca 100644 --- a/vendor/golang.org/x/xerrors/wrap.go +++ b/vendor/golang.org/x/xerrors/wrap.go @@ -35,6 +35,8 @@ func (e noWrapper) FormatError(p Printer) (next error) { // Unwrap returns the result of calling the Unwrap method on err, if err implements // Unwrap. Otherwise, Unwrap returns nil. +// +// Deprecated: As of Go 1.13, use errors.Unwrap instead. func Unwrap(err error) error { u, ok := err.(Wrapper) if !ok { @@ -47,6 +49,8 @@ func Unwrap(err error) error { // // An error is considered to match a target if it is equal to that target or if // it implements a method Is(error) bool such that Is(target) returns true. +// +// Deprecated: As of Go 1.13, use errors.Is instead. func Is(err, target error) bool { if target == nil { return err == target @@ -77,6 +81,8 @@ func Is(err, target error) bool { // // The As method should set the target to its value and return true if err // matches the type to which target points. +// +// Deprecated: As of Go 1.13, use errors.As instead. func As(err error, target interface{}) bool { if target == nil { panic("errors: target cannot be nil") diff --git a/vendor/google.golang.org/api/dns/v1/dns-api.json b/vendor/google.golang.org/api/dns/v1/dns-api.json index 08f8810b5..f0e24c15c 100644 --- a/vendor/google.golang.org/api/dns/v1/dns-api.json +++ b/vendor/google.golang.org/api/dns/v1/dns-api.json @@ -588,6 +588,37 @@ "https://www.googleapis.com/auth/ndev.clouddns.readwrite" ] }, + "getIamPolicy": { + "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", + "flatPath": "dns/v1/projects/{projectsId}/managedZones/{managedZonesId}:getIamPolicy", + "httpMethod": "POST", + "id": "dns.managedZones.getIamPolicy", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "location": "path", + "pattern": "^projects/[^/]+/managedZones/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "dns/v1/{+resource}:getIamPolicy", + "request": { + "$ref": "GoogleIamV1GetIamPolicyRequest" + }, + "response": { + "$ref": "GoogleIamV1Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, "list": { "description": "Enumerates ManagedZones that have been created but not yet deleted.", "flatPath": "dns/v1/projects/{project}/managedZones", @@ -671,6 +702,66 @@ "https://www.googleapis.com/auth/ndev.clouddns.readwrite" ] }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + "flatPath": "dns/v1/projects/{projectsId}/managedZones/{managedZonesId}:setIamPolicy", + "httpMethod": "POST", + "id": "dns.managedZones.setIamPolicy", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "location": "path", + "pattern": "^projects/[^/]+/managedZones/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "dns/v1/{+resource}:setIamPolicy", + "request": { + "$ref": "GoogleIamV1SetIamPolicyRequest" + }, + "response": { + "$ref": "GoogleIamV1Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", + "flatPath": "dns/v1/projects/{projectsId}/managedZones/{managedZonesId}:testIamPermissions", + "httpMethod": "POST", + "id": "dns.managedZones.testIamPermissions", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "location": "path", + "pattern": "^projects/[^/]+/managedZones/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "dns/v1/{+resource}:testIamPermissions", + "request": { + "$ref": "GoogleIamV1TestIamPermissionsRequest" + }, + "response": { + "$ref": "GoogleIamV1TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, "update": { "description": "Updates an existing ManagedZone.", "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", @@ -1233,135 +1324,633 @@ ] } } - } - }, - "revision": "20211103", - "rootUrl": "https://dns.googleapis.com/", - "schemas": { - "Change": { - "description": "A Change represents a set of ResourceRecordSet additions and deletions applied atomically to a ManagedZone. ResourceRecordSets within a ManagedZone are modified by creating a new Change element in the Changes collection. In turn the Changes collection also records the past modifications to the ResourceRecordSets in a ManagedZone. The current state of the ManagedZone is the sum effect of applying all Change elements in the Changes collection in sequence.", - "id": "Change", - "properties": { - "additions": { - "description": "Which ResourceRecordSets to add?", - "items": { - "$ref": "ResourceRecordSet" + }, + "responsePolicies": { + "methods": { + "create": { + "description": "Creates a new Response Policy", + "flatPath": "dns/v1/projects/{project}/responsePolicies", + "httpMethod": "POST", + "id": "dns.responsePolicies.create", + "parameterOrder": [ + "project" + ], + "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + } }, - "type": "array" - }, - "deletions": { - "description": "Which ResourceRecordSets to remove? Must match existing data exactly.", - "items": { - "$ref": "ResourceRecordSet" + "path": "dns/v1/projects/{project}/responsePolicies", + "request": { + "$ref": "ResponsePolicy" }, - "type": "array" - }, - "id": { - "description": "Unique identifier for the resource; defined by the server (output only).", - "type": "string" - }, - "isServing": { - "description": "If the DNS queries for the zone will be served.", - "type": "boolean" - }, - "kind": { - "default": "dns#change", - "type": "string" - }, - "startTime": { - "description": "The time that this operation was started by the server (output only). This is in RFC3339 text format.", - "type": "string" + "response": { + "$ref": "ResponsePolicy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] }, - "status": { - "description": "Status of the operation (output only). A status of \"done\" means that the request to update the authoritative servers has been sent, but the servers might not be updated yet.", - "enum": [ - "pending", - "done" - ], - "enumDescriptions": [ - "", - "" + "delete": { + "description": "Deletes a previously created Response Policy. Fails if the response policy is non-empty or still being referenced by a network.", + "flatPath": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}", + "httpMethod": "DELETE", + "id": "dns.responsePolicies.delete", + "parameterOrder": [ + "project", + "responsePolicy" ], - "type": "string" - } - }, - "type": "object" - }, - "ChangesListResponse": { - "description": "The response to a request to enumerate Changes to a ResourceRecordSets collection.", - "id": "ChangesListResponse", - "properties": { - "changes": { - "description": "The requested changes.", - "items": { - "$ref": "Change" + "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + }, + "responsePolicy": { + "description": "User assigned name of the Response Policy addressed by this request.", + "location": "path", + "required": true, + "type": "string" + } }, - "type": "array" - }, - "header": { - "$ref": "ResponseHeader" - }, - "kind": { - "default": "dns#changesListResponse", - "description": "Type of resource.", - "type": "string" + "path": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] }, - "nextPageToken": { - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token. This lets you retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned are an inconsistent view of the collection. You cannot retrieve a \"snapshot\" of collections larger than the maximum page size.", - "type": "string" - } - }, - "type": "object" - }, - "DnsKey": { - "description": "A DNSSEC key pair.", - "id": "DnsKey", - "properties": { - "algorithm": { - "description": "String mnemonic specifying the DNSSEC algorithm of this key. Immutable after creation time.", - "enum": [ - "rsasha1", - "rsasha256", - "rsasha512", - "ecdsap256sha256", - "ecdsap384sha384" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" + "get": { + "description": "Fetches the representation of an existing Response Policy.", + "flatPath": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}", + "httpMethod": "GET", + "id": "dns.responsePolicies.get", + "parameterOrder": [ + "project", + "responsePolicy" ], - "type": "string" - }, - "creationTime": { - "description": "The time that this resource was created in the control plane. This is in RFC3339 text format. Output only.", - "type": "string" - }, - "description": { - "description": "A mutable string of at most 1024 characters associated with this resource for the user's convenience. Has no effect on the resource's function.", - "type": "string" - }, - "digests": { - "description": "Cryptographic hashes of the DNSKEY resource record associated with this DnsKey. These digests are needed to construct a DS record that points at this DNS key. Output only.", - "items": { - "$ref": "DnsKeyDigest" + "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + }, + "responsePolicy": { + "description": "User assigned name of the Response Policy addressed by this request.", + "location": "path", + "required": true, + "type": "string" + } }, - "type": "array" - }, - "id": { - "description": "Unique identifier for the resource; defined by the server (output only).", - "type": "string" - }, - "isActive": { - "description": "Active keys are used to sign subsequent changes to the ManagedZone. Inactive keys are still present as DNSKEY Resource Records for the use of resolvers validating existing signatures.", - "type": "boolean" - }, - "keyLength": { - "description": "Length of the key in bits. Specified at creation time, and then immutable.", - "format": "uint32", - "type": "integer" + "path": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}", + "response": { + "$ref": "ResponsePolicy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "list": { + "description": "Enumerates all Response Policies associated with a project.", + "flatPath": "dns/v1/projects/{project}/responsePolicies", + "httpMethod": "GET", + "id": "dns.responsePolicies.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "maxResults": { + "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "dns/v1/projects/{project}/responsePolicies", + "response": { + "$ref": "ResponsePoliciesListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "patch": { + "description": "Applies a partial update to an existing Response Policy.", + "flatPath": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}", + "httpMethod": "PATCH", + "id": "dns.responsePolicies.patch", + "parameterOrder": [ + "project", + "responsePolicy" + ], + "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + }, + "responsePolicy": { + "description": "User assigned name of the Respones Policy addressed by this request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}", + "request": { + "$ref": "ResponsePolicy" + }, + "response": { + "$ref": "ResponsePoliciesPatchResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "update": { + "description": "Updates an existing Response Policy.", + "flatPath": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}", + "httpMethod": "PUT", + "id": "dns.responsePolicies.update", + "parameterOrder": [ + "project", + "responsePolicy" + ], + "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + }, + "responsePolicy": { + "description": "User assigned name of the Response Policy addressed by this request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}", + "request": { + "$ref": "ResponsePolicy" + }, + "response": { + "$ref": "ResponsePoliciesUpdateResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + } + } + }, + "responsePolicyRules": { + "methods": { + "create": { + "description": "Creates a new Response Policy Rule.", + "flatPath": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules", + "httpMethod": "POST", + "id": "dns.responsePolicyRules.create", + "parameterOrder": [ + "project", + "responsePolicy" + ], + "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + }, + "responsePolicy": { + "description": "User assigned name of the Response Policy containing the Response Policy Rule.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules", + "request": { + "$ref": "ResponsePolicyRule" + }, + "response": { + "$ref": "ResponsePolicyRule" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "delete": { + "description": "Deletes a previously created Response Policy Rule.", + "flatPath": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules/{responsePolicyRule}", + "httpMethod": "DELETE", + "id": "dns.responsePolicyRules.delete", + "parameterOrder": [ + "project", + "responsePolicy", + "responsePolicyRule" + ], + "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + }, + "responsePolicy": { + "description": "User assigned name of the Response Policy containing the Response Policy Rule.", + "location": "path", + "required": true, + "type": "string" + }, + "responsePolicyRule": { + "description": "User assigned name of the Response Policy Rule addressed by this request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules/{responsePolicyRule}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "get": { + "description": "Fetches the representation of an existing Response Policy Rule.", + "flatPath": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules/{responsePolicyRule}", + "httpMethod": "GET", + "id": "dns.responsePolicyRules.get", + "parameterOrder": [ + "project", + "responsePolicy", + "responsePolicyRule" + ], + "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + }, + "responsePolicy": { + "description": "User assigned name of the Response Policy containing the Response Policy Rule.", + "location": "path", + "required": true, + "type": "string" + }, + "responsePolicyRule": { + "description": "User assigned name of the Response Policy Rule addressed by this request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules/{responsePolicyRule}", + "response": { + "$ref": "ResponsePolicyRule" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "list": { + "description": "Enumerates all Response Policy Rules associated with a project.", + "flatPath": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules", + "httpMethod": "GET", + "id": "dns.responsePolicyRules.list", + "parameterOrder": [ + "project", + "responsePolicy" + ], + "parameters": { + "maxResults": { + "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + }, + "responsePolicy": { + "description": "User assigned name of the Response Policy to list.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules", + "response": { + "$ref": "ResponsePolicyRulesListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "patch": { + "description": "Applies a partial update to an existing Response Policy Rule.", + "flatPath": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules/{responsePolicyRule}", + "httpMethod": "PATCH", + "id": "dns.responsePolicyRules.patch", + "parameterOrder": [ + "project", + "responsePolicy", + "responsePolicyRule" + ], + "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + }, + "responsePolicy": { + "description": "User assigned name of the Response Policy containing the Response Policy Rule.", + "location": "path", + "required": true, + "type": "string" + }, + "responsePolicyRule": { + "description": "User assigned name of the Response Policy Rule addressed by this request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules/{responsePolicyRule}", + "request": { + "$ref": "ResponsePolicyRule" + }, + "response": { + "$ref": "ResponsePolicyRulesPatchResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "update": { + "description": "Updates an existing Response Policy Rule.", + "flatPath": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules/{responsePolicyRule}", + "httpMethod": "PUT", + "id": "dns.responsePolicyRules.update", + "parameterOrder": [ + "project", + "responsePolicy", + "responsePolicyRule" + ], + "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + }, + "responsePolicy": { + "description": "User assigned name of the Response Policy containing the Response Policy Rule.", + "location": "path", + "required": true, + "type": "string" + }, + "responsePolicyRule": { + "description": "User assigned name of the Response Policy Rule addressed by this request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules/{responsePolicyRule}", + "request": { + "$ref": "ResponsePolicyRule" + }, + "response": { + "$ref": "ResponsePolicyRulesUpdateResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + } + } + } + }, + "revision": "20220630", + "rootUrl": "https://dns.googleapis.com/", + "schemas": { + "Change": { + "description": "A Change represents a set of ResourceRecordSet additions and deletions applied atomically to a ManagedZone. ResourceRecordSets within a ManagedZone are modified by creating a new Change element in the Changes collection. In turn the Changes collection also records the past modifications to the ResourceRecordSets in a ManagedZone. The current state of the ManagedZone is the sum effect of applying all Change elements in the Changes collection in sequence.", + "id": "Change", + "properties": { + "additions": { + "description": "Which ResourceRecordSets to add?", + "items": { + "$ref": "ResourceRecordSet" + }, + "type": "array" + }, + "deletions": { + "description": "Which ResourceRecordSets to remove? Must match existing data exactly.", + "items": { + "$ref": "ResourceRecordSet" + }, + "type": "array" + }, + "id": { + "description": "Unique identifier for the resource; defined by the server (output only).", + "type": "string" + }, + "isServing": { + "description": "If the DNS queries for the zone will be served.", + "type": "boolean" + }, + "kind": { + "default": "dns#change", + "type": "string" + }, + "startTime": { + "description": "The time that this operation was started by the server (output only). This is in RFC3339 text format.", + "type": "string" + }, + "status": { + "description": "Status of the operation (output only). A status of \"done\" means that the request to update the authoritative servers has been sent, but the servers might not be updated yet.", + "enum": [ + "pending", + "done" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "ChangesListResponse": { + "description": "The response to a request to enumerate Changes to a ResourceRecordSets collection.", + "id": "ChangesListResponse", + "properties": { + "changes": { + "description": "The requested changes.", + "items": { + "$ref": "Change" + }, + "type": "array" + }, + "header": { + "$ref": "ResponseHeader" + }, + "kind": { + "default": "dns#changesListResponse", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token. This lets you retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned are an inconsistent view of the collection. You cannot retrieve a \"snapshot\" of collections larger than the maximum page size.", + "type": "string" + } + }, + "type": "object" + }, + "DnsKey": { + "description": "A DNSSEC key pair.", + "id": "DnsKey", + "properties": { + "algorithm": { + "description": "String mnemonic specifying the DNSSEC algorithm of this key. Immutable after creation time.", + "enum": [ + "rsasha1", + "rsasha256", + "rsasha512", + "ecdsap256sha256", + "ecdsap384sha384" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "creationTime": { + "description": "The time that this resource was created in the control plane. This is in RFC3339 text format. Output only.", + "type": "string" + }, + "description": { + "description": "A mutable string of at most 1024 characters associated with this resource for the user's convenience. Has no effect on the resource's function.", + "type": "string" + }, + "digests": { + "description": "Cryptographic hashes of the DNSKEY resource record associated with this DnsKey. These digests are needed to construct a DS record that points at this DNS key. Output only.", + "items": { + "$ref": "DnsKeyDigest" + }, + "type": "array" + }, + "id": { + "description": "Unique identifier for the resource; defined by the server (output only).", + "type": "string" + }, + "isActive": { + "description": "Active keys are used to sign subsequent changes to the ManagedZone. Inactive keys are still present as DNSKEY Resource Records for the use of resolvers validating existing signatures.", + "type": "boolean" + }, + "keyLength": { + "description": "Length of the key in bits. Specified at creation time, and then immutable.", + "format": "uint32", + "type": "integer" }, "keyTag": { "description": "The key tag is a non-cryptographic hash of the a DNSKEY resource record associated with this DnsKey. The key tag can be used to identify a DNSKEY more quickly (but it is not a unique identifier). In particular, the key tag is used in a parent zone's DS record to point at the DNSKEY in this child ManagedZone. The key tag is a number in the range [0, 65535] and the algorithm to calculate it is specified in RFC4034 Appendix B. Output only.", @@ -1454,35 +2043,226 @@ ], "type": "string" }, - "kind": { - "default": "dns#dnsKeySpec", + "kind": { + "default": "dns#dnsKeySpec", + "type": "string" + } + }, + "type": "object" + }, + "DnsKeysListResponse": { + "description": "The response to a request to enumerate DnsKeys in a ManagedZone.", + "id": "DnsKeysListResponse", + "properties": { + "dnsKeys": { + "description": "The requested resources.", + "items": { + "$ref": "DnsKey" + }, + "type": "array" + }, + "header": { + "$ref": "ResponseHeader" + }, + "kind": { + "default": "dns#dnsKeysListResponse", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token. In this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned are an inconsistent view of the collection. There is no way to retrieve a \"snapshot\" of collections larger than the maximum page size.", + "type": "string" + } + }, + "type": "object" + }, + "Expr": { + "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() \u003c 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' \u0026\u0026 document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", + "id": "Expr", + "properties": { + "description": { + "description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", + "type": "string" + }, + "expression": { + "description": "Textual representation of an expression in Common Expression Language syntax.", + "type": "string" + }, + "location": { + "description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", + "type": "string" + }, + "title": { + "description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleIamV1AuditConfig": { + "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts `jose@example.com` from DATA_READ logging, and `aliya@example.com` from DATA_WRITE logging.", + "id": "GoogleIamV1AuditConfig", + "properties": { + "auditLogConfigs": { + "description": "The configuration for logging of each type of permission.", + "items": { + "$ref": "GoogleIamV1AuditLogConfig" + }, + "type": "array" + }, + "service": { + "description": "Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleIamV1AuditLogConfig": { + "description": "Provides the configuration for logging a type of permissions. Example: { \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.", + "id": "GoogleIamV1AuditLogConfig", + "properties": { + "exemptedMembers": { + "description": "Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.", + "items": { + "type": "string" + }, + "type": "array" + }, + "logType": { + "description": "The log type that this config enables.", + "enum": [ + "LOG_TYPE_UNSPECIFIED", + "ADMIN_READ", + "DATA_WRITE", + "DATA_READ" + ], + "enumDescriptions": [ + "Default case. Should never be this.", + "Admin reads. Example: CloudIAM getIamPolicy", + "Data writes. Example: CloudSQL Users create", + "Data reads. Example: CloudSQL Users list" + ], + "type": "string" + } + }, + "type": "object" + }, + "GoogleIamV1Binding": { + "description": "Associates `members`, or principals, with a `role`.", + "id": "GoogleIamV1Binding", + "properties": { + "condition": { + "$ref": "Expr", + "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." + }, + "members": { + "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", + "items": { + "type": "string" + }, + "type": "array" + }, + "role": { + "description": "Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleIamV1GetIamPolicyRequest": { + "description": "Request message for `GetIamPolicy` method.", + "id": "GoogleIamV1GetIamPolicyRequest", + "properties": { + "options": { + "$ref": "GoogleIamV1GetPolicyOptions", + "description": "OPTIONAL: A `GetPolicyOptions` object for specifying options to `GetIamPolicy`." + } + }, + "type": "object" + }, + "GoogleIamV1GetPolicyOptions": { + "description": "Encapsulates settings provided to GetIamPolicy.", + "id": "GoogleIamV1GetPolicyOptions", + "properties": { + "requestedPolicyVersion": { + "description": "Optional. The maximum policy version that will be used to format the policy. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional role bindings must specify version 3. Policies with no conditional role bindings may specify any valid value or leave the field unset. The policy in the response might use the policy version that you specified, or it might use a lower policy version. For example, if you specify version 3, but the policy has no conditional role bindings, the response uses version 1. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "GoogleIamV1Policy": { + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", + "id": "GoogleIamV1Policy", + "properties": { + "auditConfigs": { + "description": "Specifies cloud audit logging configuration for this policy.", + "items": { + "$ref": "GoogleIamV1AuditConfig" + }, + "type": "array" + }, + "bindings": { + "description": "Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.", + "items": { + "$ref": "GoogleIamV1Binding" + }, + "type": "array" + }, + "etag": { + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", + "format": "byte", + "type": "string" + }, + "version": { + "description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "GoogleIamV1SetIamPolicyRequest": { + "description": "Request message for `SetIamPolicy` method.", + "id": "GoogleIamV1SetIamPolicyRequest", + "properties": { + "policy": { + "$ref": "GoogleIamV1Policy", + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Google Cloud services (such as Projects) might reject them." + }, + "updateMask": { + "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: \"bindings, etag\"`", + "format": "google-fieldmask", "type": "string" } }, "type": "object" }, - "DnsKeysListResponse": { - "description": "The response to a request to enumerate DnsKeys in a ManagedZone.", - "id": "DnsKeysListResponse", + "GoogleIamV1TestIamPermissionsRequest": { + "description": "Request message for `TestIamPermissions` method.", + "id": "GoogleIamV1TestIamPermissionsRequest", "properties": { - "dnsKeys": { - "description": "The requested resources.", + "permissions": { + "description": "The set of permissions to check for the `resource`. Permissions with wildcards (such as `*` or `storage.*`) are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", "items": { - "$ref": "DnsKey" + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleIamV1TestIamPermissionsResponse": { + "description": "Response message for `TestIamPermissions` method.", + "id": "GoogleIamV1TestIamPermissionsResponse", + "properties": { + "permissions": { + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", + "items": { + "type": "string" }, "type": "array" - }, - "header": { - "$ref": "ResponseHeader" - }, - "kind": { - "default": "dns#dnsKeysListResponse", - "description": "Type of resource.", - "type": "string" - }, - "nextPageToken": { - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token. In this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned are an inconsistent view of the collection. There is no way to retrieve a \"snapshot\" of collections larger than the maximum page size.", - "type": "string" } }, "type": "object" @@ -2090,6 +2870,26 @@ "format": "int32", "type": "integer" }, + "gkeClustersPerManagedZone": { + "description": "Maximum allowed number of GKE clusters to which a privately scoped zone can be attached.", + "format": "int32", + "type": "integer" + }, + "gkeClustersPerPolicy": { + "description": "Maximum allowed number of GKE clusters per policy.", + "format": "int32", + "type": "integer" + }, + "gkeClustersPerResponsePolicy": { + "description": "Maximum allowed number of GKE clusters per response policy.", + "format": "int32", + "type": "integer" + }, + "itemsPerRoutingPolicy": { + "description": "Maximum allowed number of items per routing policy.", + "format": "int32", + "type": "integer" + }, "kind": { "default": "dns#quota", "type": "string" @@ -2099,6 +2899,11 @@ "format": "int32", "type": "integer" }, + "managedZonesPerGkeCluster": { + "description": "Maximum allowed number of managed zones which can be attached to a GKE cluster.", + "format": "int32", + "type": "integer" + }, "managedZonesPerNetwork": { "description": "Maximum allowed number of managed zones which can be attached to a network.", "format": "int32", @@ -2114,6 +2919,16 @@ "format": "int32", "type": "integer" }, + "networksPerResponsePolicy": { + "description": "Maximum allowed number of networks per response policy.", + "format": "int32", + "type": "integer" + }, + "peeringZonesPerTargetNetwork": { + "description": "Maximum allowed number of consumer peering zones per target network owned by this producer project", + "format": "int32", + "type": "integer" + }, "policies": { "description": "Maximum allowed number of policies per project.", "format": "int32", @@ -2124,6 +2939,16 @@ "format": "int32", "type": "integer" }, + "responsePolicies": { + "description": "Maximum allowed number of response policies per project.", + "format": "int32", + "type": "integer" + }, + "responsePolicyRulesPerResponsePolicy": { + "description": "Maximum allowed number of rules per response policy.", + "format": "int32", + "type": "integer" + }, "rrsetAdditionsPerChange": { "description": "Maximum allowed number of ResourceRecordSets to add per ChangesCreateRequest.", "format": "int32", @@ -2164,6 +2989,115 @@ }, "type": "object" }, + "RRSetRoutingPolicy": { + "description": "A RRSetRoutingPolicy represents ResourceRecordSet data that is returned dynamically with the response varying based on configured properties such as geolocation or by weighted random selection.", + "id": "RRSetRoutingPolicy", + "properties": { + "geo": { + "$ref": "RRSetRoutingPolicyGeoPolicy" + }, + "kind": { + "default": "dns#rRSetRoutingPolicy", + "type": "string" + }, + "wrr": { + "$ref": "RRSetRoutingPolicyWrrPolicy" + } + }, + "type": "object" + }, + "RRSetRoutingPolicyGeoPolicy": { + "description": "Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user.", + "id": "RRSetRoutingPolicyGeoPolicy", + "properties": { + "items": { + "description": "The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead.", + "items": { + "$ref": "RRSetRoutingPolicyGeoPolicyGeoPolicyItem" + }, + "type": "array" + }, + "kind": { + "default": "dns#rRSetRoutingPolicyGeoPolicy", + "type": "string" + } + }, + "type": "object" + }, + "RRSetRoutingPolicyGeoPolicyGeoPolicyItem": { + "description": "ResourceRecordSet data for one geo location.", + "id": "RRSetRoutingPolicyGeoPolicyGeoPolicyItem", + "properties": { + "kind": { + "default": "dns#rRSetRoutingPolicyGeoPolicyGeoPolicyItem", + "type": "string" + }, + "location": { + "description": "The geo-location granularity is a GCP region. This location string should correspond to a GCP region. e.g. \"us-east1\", \"southamerica-east1\", \"asia-east1\", etc.", + "type": "string" + }, + "rrdatas": { + "items": { + "type": "string" + }, + "type": "array" + }, + "signatureRrdatas": { + "description": "DNSSEC generated signatures for all the rrdata within this item. Note that if health checked targets are provided for DNSSEC enabled zones, there's a restriction of 1 ip per item. .", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "RRSetRoutingPolicyWrrPolicy": { + "description": "Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion.", + "id": "RRSetRoutingPolicyWrrPolicy", + "properties": { + "items": { + "items": { + "$ref": "RRSetRoutingPolicyWrrPolicyWrrPolicyItem" + }, + "type": "array" + }, + "kind": { + "default": "dns#rRSetRoutingPolicyWrrPolicy", + "type": "string" + } + }, + "type": "object" + }, + "RRSetRoutingPolicyWrrPolicyWrrPolicyItem": { + "description": "A routing block which contains the routing information for one WRR item.", + "id": "RRSetRoutingPolicyWrrPolicyWrrPolicyItem", + "properties": { + "kind": { + "default": "dns#rRSetRoutingPolicyWrrPolicyWrrPolicyItem", + "type": "string" + }, + "rrdatas": { + "items": { + "type": "string" + }, + "type": "array" + }, + "signatureRrdatas": { + "description": "DNSSEC generated signatures for all the rrdata within this item. Note that if health checked targets are provided for DNSSEC enabled zones, there's a restriction of 1 ip per item. .", + "items": { + "type": "string" + }, + "type": "array" + }, + "weight": { + "description": "The weight corresponding to this subset of rrdata. When multiple WeightedRoundRobinPolicyItems are configured, the probability of returning an rrset is proportional to its weight relative to the sum of weights configured for all items. This weight should be non-negative.", + "format": "double", + "type": "number" + } + }, + "type": "object" + }, "ResourceRecordSet": { "description": "A unit of data that is returned by the DNS servers.", "id": "ResourceRecordSet", @@ -2176,6 +3110,10 @@ "description": "For example, www.example.com.", "type": "string" }, + "routingPolicy": { + "$ref": "RRSetRoutingPolicy", + "description": "Configures dynamic query responses based on geo location of querying user or a weighted round robin based routing policy. A ResourceRecordSet should only have either rrdata (static) or routing_policy (dynamic). An error is returned otherwise." + }, "rrdatas": { "description": "As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) -- see examples.", "items": { @@ -2242,6 +3180,187 @@ } }, "type": "object" + }, + "ResponsePoliciesListResponse": { + "id": "ResponsePoliciesListResponse", + "properties": { + "header": { + "$ref": "ResponseHeader" + }, + "nextPageToken": { + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token. This lets you the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned are an inconsistent view of the collection. You cannot retrieve a consistent snapshot of a collection larger than the maximum page size.", + "type": "string" + }, + "responsePolicies": { + "description": "The Response Policy resources.", + "items": { + "$ref": "ResponsePolicy" + }, + "type": "array" + } + }, + "type": "object" + }, + "ResponsePoliciesPatchResponse": { + "id": "ResponsePoliciesPatchResponse", + "properties": { + "header": { + "$ref": "ResponseHeader" + }, + "responsePolicy": { + "$ref": "ResponsePolicy" + } + }, + "type": "object" + }, + "ResponsePoliciesUpdateResponse": { + "id": "ResponsePoliciesUpdateResponse", + "properties": { + "header": { + "$ref": "ResponseHeader" + }, + "responsePolicy": { + "$ref": "ResponsePolicy" + } + }, + "type": "object" + }, + "ResponsePolicy": { + "description": "A Response Policy is a collection of selectors that apply to queries made against one or more Virtual Private Cloud networks.", + "id": "ResponsePolicy", + "properties": { + "description": { + "description": "User-provided description for this Response Policy.", + "type": "string" + }, + "id": { + "description": "Unique identifier for the resource; defined by the server (output only).", + "format": "int64", + "type": "string" + }, + "kind": { + "default": "dns#responsePolicy", + "type": "string" + }, + "networks": { + "description": "List of network names specifying networks to which this policy is applied.", + "items": { + "$ref": "ResponsePolicyNetwork" + }, + "type": "array" + }, + "responsePolicyName": { + "description": "User assigned name for this Response Policy.", + "type": "string" + } + }, + "type": "object" + }, + "ResponsePolicyNetwork": { + "id": "ResponsePolicyNetwork", + "properties": { + "kind": { + "default": "dns#responsePolicyNetwork", + "type": "string" + }, + "networkUrl": { + "description": "The fully qualified URL of the VPC network to bind to. This should be formatted like https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}", + "type": "string" + } + }, + "type": "object" + }, + "ResponsePolicyRule": { + "description": "A Response Policy Rule is a selector that applies its behavior to queries that match the selector. Selectors are DNS names, which may be wildcards or exact matches. Each DNS query subject to a Response Policy matches at most one ResponsePolicyRule, as identified by the dns_name field with the longest matching suffix.", + "id": "ResponsePolicyRule", + "properties": { + "behavior": { + "description": "Answer this query with a behavior rather than DNS data.", + "enum": [ + "behaviorUnspecified", + "bypassResponsePolicy" + ], + "enumDescriptions": [ + "", + "Skip a less-specific ResponsePolicyRule and continue normal query logic. This can be used with a less-specific wildcard selector to exempt a subset of the wildcard ResponsePolicyRule from the ResponsePolicy behavior and query the public Internet instead. For instance, if these rules exist: *.example.com -\u003e LocalData 1.2.3.4 foo.example.com -\u003e Behavior 'bypassResponsePolicy' Then a query for 'foo.example.com' skips the wildcard. This additionally functions to facilitate the allowlist feature. RPZs can be applied to multiple levels in the (eventually org, folder, project, network) hierarchy. If a rule is applied at a higher level of the hierarchy, adding a passthru rule at a lower level will supersede that, and a query from an affected vm to that domain will be exempt from the RPZ and proceed to normal resolution behavior." + ], + "type": "string" + }, + "dnsName": { + "description": "The DNS name (wildcard or exact) to apply this rule to. Must be unique within the Response Policy Rule.", + "type": "string" + }, + "kind": { + "default": "dns#responsePolicyRule", + "type": "string" + }, + "localData": { + "$ref": "ResponsePolicyRuleLocalData", + "description": "Answer this query directly with DNS data. These ResourceRecordSets override any other DNS behavior for the matched name; in particular they override private zones, the public internet, and GCP internal DNS. No SOA nor NS types are allowed." + }, + "ruleName": { + "description": "An identifier for this rule. Must be unique with the ResponsePolicy.", + "type": "string" + } + }, + "type": "object" + }, + "ResponsePolicyRuleLocalData": { + "id": "ResponsePolicyRuleLocalData", + "properties": { + "localDatas": { + "description": "All resource record sets for this selector, one per resource record type. The name must match the dns_name.", + "items": { + "$ref": "ResourceRecordSet" + }, + "type": "array" + } + }, + "type": "object" + }, + "ResponsePolicyRulesListResponse": { + "id": "ResponsePolicyRulesListResponse", + "properties": { + "header": { + "$ref": "ResponseHeader" + }, + "nextPageToken": { + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token. This lets you the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned are an inconsistent view of the collection. You cannot retrieve a consistent snapshot of a collection larger than the maximum page size.", + "type": "string" + }, + "responsePolicyRules": { + "description": "The Response Policy Rule resources.", + "items": { + "$ref": "ResponsePolicyRule" + }, + "type": "array" + } + }, + "type": "object" + }, + "ResponsePolicyRulesPatchResponse": { + "id": "ResponsePolicyRulesPatchResponse", + "properties": { + "header": { + "$ref": "ResponseHeader" + }, + "responsePolicyRule": { + "$ref": "ResponsePolicyRule" + } + }, + "type": "object" + }, + "ResponsePolicyRulesUpdateResponse": { + "id": "ResponsePolicyRulesUpdateResponse", + "properties": { + "header": { + "$ref": "ResponseHeader" + }, + "responsePolicyRule": { + "$ref": "ResponsePolicyRule" + } + }, + "type": "object" } }, "servicePath": "", diff --git a/vendor/google.golang.org/api/dns/v1/dns-gen.go b/vendor/google.golang.org/api/dns/v1/dns-gen.go index 0ad38c0dd..600b4f8ef 100644 --- a/vendor/google.golang.org/api/dns/v1/dns-gen.go +++ b/vendor/google.golang.org/api/dns/v1/dns-gen.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC. +// Copyright 2022 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -54,6 +54,7 @@ import ( "strings" googleapi "google.golang.org/api/googleapi" + internal "google.golang.org/api/internal" gensupport "google.golang.org/api/internal/gensupport" option "google.golang.org/api/option" internaloption "google.golang.org/api/option/internaloption" @@ -100,7 +101,7 @@ const ( // NewService creates a new Service. func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { - scopesOption := option.WithScopes( + scopesOption := internaloption.WithDefaultScopes( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only", "https://www.googleapis.com/auth/ndev.clouddns.readonly", @@ -141,6 +142,8 @@ func New(client *http.Client) (*Service, error) { s.Policies = NewPoliciesService(s) s.Projects = NewProjectsService(s) s.ResourceRecordSets = NewResourceRecordSetsService(s) + s.ResponsePolicies = NewResponsePoliciesService(s) + s.ResponsePolicyRules = NewResponsePolicyRulesService(s) return s, nil } @@ -162,6 +165,10 @@ type Service struct { Projects *ProjectsService ResourceRecordSets *ResourceRecordSetsService + + ResponsePolicies *ResponsePoliciesService + + ResponsePolicyRules *ResponsePolicyRulesService } func (s *Service) userAgent() string { @@ -234,6 +241,24 @@ type ResourceRecordSetsService struct { s *Service } +func NewResponsePoliciesService(s *Service) *ResponsePoliciesService { + rs := &ResponsePoliciesService{s: s} + return rs +} + +type ResponsePoliciesService struct { + s *Service +} + +func NewResponsePolicyRulesService(s *Service) *ResponsePolicyRulesService { + rs := &ResponsePolicyRulesService{s: s} + return rs +} + +type ResponsePolicyRulesService struct { + s *Service +} + // Change: A Change represents a set of ResourceRecordSet additions and // deletions applied atomically to a ManagedZone. ResourceRecordSets // within a ManagedZone are modified by creating a new Change element in @@ -580,6 +605,523 @@ func (s *DnsKeysListResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Expr: Represents a textual expression in the Common Expression +// Language (CEL) syntax. CEL is a C-like expression language. The +// syntax and semantics of CEL are documented at +// https://github.com/google/cel-spec. Example (Comparison): title: +// "Summary size limit" description: "Determines if a summary is less +// than 100 chars" expression: "document.summary.size() < 100" Example +// (Equality): title: "Requestor is owner" description: "Determines if +// requestor is the document owner" expression: "document.owner == +// request.auth.claims.email" Example (Logic): title: "Public documents" +// description: "Determine whether the document should be publicly +// visible" expression: "document.type != 'private' && document.type != +// 'internal'" Example (Data Manipulation): title: "Notification string" +// description: "Create a notification string with a timestamp." +// expression: "'New message received at ' + +// string(document.create_time)" The exact variables and functions that +// may be referenced within an expression are determined by the service +// that evaluates it. See the service documentation for additional +// information. +type Expr struct { + // Description: Optional. Description of the expression. This is a + // longer text which describes the expression, e.g. when hovered over it + // in a UI. + Description string `json:"description,omitempty"` + + // Expression: Textual representation of an expression in Common + // Expression Language syntax. + Expression string `json:"expression,omitempty"` + + // Location: Optional. String indicating the location of the expression + // for error reporting, e.g. a file name and a position in the file. + Location string `json:"location,omitempty"` + + // Title: Optional. Title for the expression, i.e. a short string + // describing its purpose. This can be used e.g. in UIs which allow to + // enter the expression. + Title string `json:"title,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Expr) MarshalJSON() ([]byte, error) { + type NoMethod Expr + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleIamV1AuditConfig: Specifies the audit configuration for a +// service. The configuration determines which permission types are +// logged, and what identities, if any, are exempted from logging. An +// AuditConfig must have one or more AuditLogConfigs. If there are +// AuditConfigs for both `allServices` and a specific service, the union +// of the two AuditConfigs is used for that service: the log_types +// specified in each AuditConfig are enabled, and the exempted_members +// in each AuditLogConfig are exempted. Example Policy with multiple +// AuditConfigs: { "audit_configs": [ { "service": "allServices", +// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": +// [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { +// "log_type": "ADMIN_READ" } ] }, { "service": +// "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": +// "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ +// "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy +// enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts +// `jose@example.com` from DATA_READ logging, and `aliya@example.com` +// from DATA_WRITE logging. +type GoogleIamV1AuditConfig struct { + // AuditLogConfigs: The configuration for logging of each type of + // permission. + AuditLogConfigs []*GoogleIamV1AuditLogConfig `json:"auditLogConfigs,omitempty"` + + // Service: Specifies a service that will be enabled for audit logging. + // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. + // `allServices` is a special value that covers all services. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AuditLogConfigs") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AuditLogConfigs") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GoogleIamV1AuditConfig) MarshalJSON() ([]byte, error) { + type NoMethod GoogleIamV1AuditConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleIamV1AuditLogConfig: Provides the configuration for logging a +// type of permissions. Example: { "audit_log_configs": [ { "log_type": +// "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { +// "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and +// 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ +// logging. +type GoogleIamV1AuditLogConfig struct { + // ExemptedMembers: Specifies the identities that do not cause logging + // for this type of permission. Follows the same format of + // Binding.members. + ExemptedMembers []string `json:"exemptedMembers,omitempty"` + + // LogType: The log type that this config enables. + // + // Possible values: + // "LOG_TYPE_UNSPECIFIED" - Default case. Should never be this. + // "ADMIN_READ" - Admin reads. Example: CloudIAM getIamPolicy + // "DATA_WRITE" - Data writes. Example: CloudSQL Users create + // "DATA_READ" - Data reads. Example: CloudSQL Users list + LogType string `json:"logType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExemptedMembers") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExemptedMembers") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GoogleIamV1AuditLogConfig) MarshalJSON() ([]byte, error) { + type NoMethod GoogleIamV1AuditLogConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleIamV1Binding: Associates `members`, or principals, with a +// `role`. +type GoogleIamV1Binding struct { + // Condition: The condition that is associated with this binding. If the + // condition evaluates to `true`, then this binding applies to the + // current request. If the condition evaluates to `false`, then this + // binding does not apply to the current request. However, a different + // role binding might grant the same role to one or more of the + // principals in this binding. To learn which resources support + // conditions in their IAM policies, see the IAM documentation + // (https://cloud.google.com/iam/help/conditions/resource-policies). + Condition *Expr `json:"condition,omitempty"` + + // Members: Specifies the principals requesting access for a Google + // Cloud resource. `members` can have the following values: * + // `allUsers`: A special identifier that represents anyone who is on the + // internet; with or without a Google account. * + // `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. * + // `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . * + // `serviceAccount:{emailid}`: An email address that represents a + // service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An + // email address that represents a Google group. For example, + // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An + // email address (plus unique identifier) representing a user that has + // been recently deleted. For example, + // `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered + // user retains the role in the binding. * + // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address + // (plus unique identifier) representing a service account that has been + // recently deleted. For example, + // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains + // the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: + // An email address (plus unique identifier) representing a Google group + // that has been recently deleted. For example, + // `admins@example.com?uid=123456789012345678901`. If the group is + // recovered, this value reverts to `group:{emailid}` and the recovered + // group retains the role in the binding. * `domain:{domain}`: The G + // Suite domain (primary) that represents all the users of that domain. + // For example, `google.com` or `example.com`. + Members []string `json:"members,omitempty"` + + // Role: Role that is assigned to the list of `members`, or principals. + // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + Role string `json:"role,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Condition") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Condition") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleIamV1Binding) MarshalJSON() ([]byte, error) { + type NoMethod GoogleIamV1Binding + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleIamV1GetIamPolicyRequest: Request message for `GetIamPolicy` +// method. +type GoogleIamV1GetIamPolicyRequest struct { + // Options: OPTIONAL: A `GetPolicyOptions` object for specifying options + // to `GetIamPolicy`. + Options *GoogleIamV1GetPolicyOptions `json:"options,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Options") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Options") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleIamV1GetIamPolicyRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleIamV1GetIamPolicyRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleIamV1GetPolicyOptions: Encapsulates settings provided to +// GetIamPolicy. +type GoogleIamV1GetPolicyOptions struct { + // RequestedPolicyVersion: Optional. The maximum policy version that + // will be used to format the policy. Valid values are 0, 1, and 3. + // Requests specifying an invalid value will be rejected. Requests for + // policies with any conditional role bindings must specify version 3. + // Policies with no conditional role bindings may specify any valid + // value or leave the field unset. The policy in the response might use + // the policy version that you specified, or it might use a lower policy + // version. For example, if you specify version 3, but the policy has no + // conditional role bindings, the response uses version 1. To learn + // which resources support conditions in their IAM policies, see the IAM + // documentation + // (https://cloud.google.com/iam/help/conditions/resource-policies). + RequestedPolicyVersion int64 `json:"requestedPolicyVersion,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "RequestedPolicyVersion") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RequestedPolicyVersion") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GoogleIamV1GetPolicyOptions) MarshalJSON() ([]byte, error) { + type NoMethod GoogleIamV1GetPolicyOptions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleIamV1Policy: An Identity and Access Management (IAM) policy, +// which specifies access controls for Google Cloud resources. A +// `Policy` is a collection of `bindings`. A `binding` binds one or more +// `members`, or principals, to a single `role`. Principals can be user +// accounts, service accounts, Google groups, and domains (such as G +// Suite). A `role` is a named list of permissions; each `role` can be +// an IAM predefined role or a user-created custom role. For some types +// of Google Cloud resources, a `binding` can also specify a +// `condition`, which is a logical expression that allows access to a +// resource only if the expression evaluates to `true`. A condition can +// add constraints based on attributes of the request, the resource, or +// both. To learn which resources support conditions in their IAM +// policies, see the IAM documentation +// (https://cloud.google.com/iam/help/conditions/resource-policies). +// **JSON example:** { "bindings": [ { "role": +// "roles/resourcemanager.organizationAdmin", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { +// "role": "roles/resourcemanager.organizationViewer", "members": [ +// "user:eve@example.com" ], "condition": { "title": "expirable access", +// "description": "Does not grant access after Sep 2020", "expression": +// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], +// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - +// members: - user:mike@example.com - group:admins@example.com - +// domain:google.com - +// serviceAccount:my-project-id@appspot.gserviceaccount.com role: +// roles/resourcemanager.organizationAdmin - members: - +// user:eve@example.com role: roles/resourcemanager.organizationViewer +// condition: title: expirable access description: Does not grant access +// after Sep 2020 expression: request.time < +// timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 +// For a description of IAM and its features, see the IAM documentation +// (https://cloud.google.com/iam/docs/). +type GoogleIamV1Policy struct { + // AuditConfigs: Specifies cloud audit logging configuration for this + // policy. + AuditConfigs []*GoogleIamV1AuditConfig `json:"auditConfigs,omitempty"` + + // Bindings: Associates a list of `members`, or principals, with a + // `role`. Optionally, may specify a `condition` that determines how and + // when the `bindings` are applied. Each of the `bindings` must contain + // at least one principal. The `bindings` in a `Policy` can refer to up + // to 1,500 principals; up to 250 of these principals can be Google + // groups. Each occurrence of a principal counts towards these limits. + // For example, if the `bindings` grant 50 different roles to + // `user:alice@example.com`, and not to any other principal, then you + // can add another 1,450 principals to the `bindings` in the `Policy`. + Bindings []*GoogleIamV1Binding `json:"bindings,omitempty"` + + // Etag: `etag` is used for optimistic concurrency control as a way to + // help prevent simultaneous updates of a policy from overwriting each + // other. It is strongly suggested that systems make use of the `etag` + // in the read-modify-write cycle to perform policy updates in order to + // avoid race conditions: An `etag` is returned in the response to + // `getIamPolicy`, and systems are expected to put that etag in the + // request to `setIamPolicy` to ensure that their change will be applied + // to the same version of the policy. **Important:** If you use IAM + // Conditions, you must include the `etag` field whenever you call + // `setIamPolicy`. If you omit this field, then IAM allows you to + // overwrite a version `3` policy with a version `1` policy, and all of + // the conditions in the version `3` policy are lost. + Etag string `json:"etag,omitempty"` + + // Version: Specifies the format of the policy. Valid values are `0`, + // `1`, and `3`. Requests that specify an invalid value are rejected. + // Any operation that affects conditional role bindings must specify + // version `3`. This requirement applies to the following operations: * + // Getting a policy that includes a conditional role binding * Adding a + // conditional role binding to a policy * Changing a conditional role + // binding in a policy * Removing any role binding, with or without a + // condition, from a policy that includes conditions **Important:** If + // you use IAM Conditions, you must include the `etag` field whenever + // you call `setIamPolicy`. If you omit this field, then IAM allows you + // to overwrite a version `3` policy with a version `1` policy, and all + // of the conditions in the version `3` policy are lost. If a policy + // does not include any conditions, operations on that policy may + // specify any valid version or leave the field unset. To learn which + // resources support conditions in their IAM policies, see the IAM + // documentation + // (https://cloud.google.com/iam/help/conditions/resource-policies). + Version int64 `json:"version,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AuditConfigs") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AuditConfigs") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleIamV1Policy) MarshalJSON() ([]byte, error) { + type NoMethod GoogleIamV1Policy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleIamV1SetIamPolicyRequest: Request message for `SetIamPolicy` +// method. +type GoogleIamV1SetIamPolicyRequest struct { + // Policy: REQUIRED: The complete policy to be applied to the + // `resource`. The size of the policy is limited to a few 10s of KB. An + // empty policy is a valid policy but certain Google Cloud services + // (such as Projects) might reject them. + Policy *GoogleIamV1Policy `json:"policy,omitempty"` + + // UpdateMask: OPTIONAL: A FieldMask specifying which fields of the + // policy to modify. Only the fields in the mask will be modified. If no + // mask is provided, the following default mask is used: `paths: + // "bindings, etag" + UpdateMask string `json:"updateMask,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Policy") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Policy") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleIamV1SetIamPolicyRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleIamV1SetIamPolicyRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleIamV1TestIamPermissionsRequest: Request message for +// `TestIamPermissions` method. +type GoogleIamV1TestIamPermissionsRequest struct { + // Permissions: The set of permissions to check for the `resource`. + // Permissions with wildcards (such as `*` or `storage.*`) are not + // allowed. For more information see IAM Overview + // (https://cloud.google.com/iam/docs/overview#permissions). + Permissions []string `json:"permissions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Permissions") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Permissions") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleIamV1TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleIamV1TestIamPermissionsRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleIamV1TestIamPermissionsResponse: Response message for +// `TestIamPermissions` method. +type GoogleIamV1TestIamPermissionsResponse struct { + // Permissions: A subset of `TestPermissionsRequest.permissions` that + // the caller is allowed. + Permissions []string `json:"permissions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Permissions") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Permissions") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleIamV1TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleIamV1TestIamPermissionsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ManagedZone: A zone is a subtree of the DNS namespace under one // administrative responsibility. A ManagedZone is a resource that // represents a DNS zone hosted by the Cloud DNS service. @@ -1629,11 +2171,31 @@ type Quota struct { // ManagedZone. DnsKeysPerManagedZone int64 `json:"dnsKeysPerManagedZone,omitempty"` + // GkeClustersPerManagedZone: Maximum allowed number of GKE clusters to + // which a privately scoped zone can be attached. + GkeClustersPerManagedZone int64 `json:"gkeClustersPerManagedZone,omitempty"` + + // GkeClustersPerPolicy: Maximum allowed number of GKE clusters per + // policy. + GkeClustersPerPolicy int64 `json:"gkeClustersPerPolicy,omitempty"` + + // GkeClustersPerResponsePolicy: Maximum allowed number of GKE clusters + // per response policy. + GkeClustersPerResponsePolicy int64 `json:"gkeClustersPerResponsePolicy,omitempty"` + + // ItemsPerRoutingPolicy: Maximum allowed number of items per routing + // policy. + ItemsPerRoutingPolicy int64 `json:"itemsPerRoutingPolicy,omitempty"` + Kind string `json:"kind,omitempty"` // ManagedZones: Maximum allowed number of managed zones in the project. ManagedZones int64 `json:"managedZones,omitempty"` + // ManagedZonesPerGkeCluster: Maximum allowed number of managed zones + // which can be attached to a GKE cluster. + ManagedZonesPerGkeCluster int64 `json:"managedZonesPerGkeCluster,omitempty"` + // ManagedZonesPerNetwork: Maximum allowed number of managed zones which // can be attached to a network. ManagedZonesPerNetwork int64 `json:"managedZonesPerNetwork,omitempty"` @@ -1645,6 +2207,14 @@ type Quota struct { // NetworksPerPolicy: Maximum allowed number of networks per policy. NetworksPerPolicy int64 `json:"networksPerPolicy,omitempty"` + // NetworksPerResponsePolicy: Maximum allowed number of networks per + // response policy. + NetworksPerResponsePolicy int64 `json:"networksPerResponsePolicy,omitempty"` + + // PeeringZonesPerTargetNetwork: Maximum allowed number of consumer + // peering zones per target network owned by this producer project + PeeringZonesPerTargetNetwork int64 `json:"peeringZonesPerTargetNetwork,omitempty"` + // Policies: Maximum allowed number of policies per project. Policies int64 `json:"policies,omitempty"` @@ -1652,6 +2222,14 @@ type Quota struct { // per ResourceRecordSet. ResourceRecordsPerRrset int64 `json:"resourceRecordsPerRrset,omitempty"` + // ResponsePolicies: Maximum allowed number of response policies per + // project. + ResponsePolicies int64 `json:"responsePolicies,omitempty"` + + // ResponsePolicyRulesPerResponsePolicy: Maximum allowed number of rules + // per response policy. + ResponsePolicyRulesPerResponsePolicy int64 `json:"responsePolicyRulesPerResponsePolicy,omitempty"` + // RrsetAdditionsPerChange: Maximum allowed number of ResourceRecordSets // to add per ChangesCreateRequest. RrsetAdditionsPerChange int64 `json:"rrsetAdditionsPerChange,omitempty"` @@ -1705,20 +2283,218 @@ func (s *Quota) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ResourceRecordSet: A unit of data that is returned by the DNS -// servers. -type ResourceRecordSet struct { - Kind string `json:"kind,omitempty"` +// RRSetRoutingPolicy: A RRSetRoutingPolicy represents ResourceRecordSet +// data that is returned dynamically with the response varying based on +// configured properties such as geolocation or by weighted random +// selection. +type RRSetRoutingPolicy struct { + Geo *RRSetRoutingPolicyGeoPolicy `json:"geo,omitempty"` - // Name: For example, www.example.com. - Name string `json:"name,omitempty"` + Kind string `json:"kind,omitempty"` - // Rrdatas: As defined in RFC 1035 (section 5) and RFC 1034 (section - // 3.6.1) -- see examples. - Rrdatas []string `json:"rrdatas,omitempty"` + Wrr *RRSetRoutingPolicyWrrPolicy `json:"wrr,omitempty"` - // SignatureRrdatas: As defined in RFC 4034 (section 3.2). - SignatureRrdatas []string `json:"signatureRrdatas,omitempty"` + // ForceSendFields is a list of field names (e.g. "Geo") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Geo") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RRSetRoutingPolicy) MarshalJSON() ([]byte, error) { + type NoMethod RRSetRoutingPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RRSetRoutingPolicyGeoPolicy: Configures a RRSetRoutingPolicy that +// routes based on the geo location of the querying user. +type RRSetRoutingPolicyGeoPolicy struct { + // Items: The primary geo routing configuration. If there are multiple + // items with the same location, an error is returned instead. + Items []*RRSetRoutingPolicyGeoPolicyGeoPolicyItem `json:"items,omitempty"` + + Kind string `json:"kind,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RRSetRoutingPolicyGeoPolicy) MarshalJSON() ([]byte, error) { + type NoMethod RRSetRoutingPolicyGeoPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RRSetRoutingPolicyGeoPolicyGeoPolicyItem: ResourceRecordSet data for +// one geo location. +type RRSetRoutingPolicyGeoPolicyGeoPolicyItem struct { + Kind string `json:"kind,omitempty"` + + // Location: The geo-location granularity is a GCP region. This location + // string should correspond to a GCP region. e.g. "us-east1", + // "southamerica-east1", "asia-east1", etc. + Location string `json:"location,omitempty"` + + Rrdatas []string `json:"rrdatas,omitempty"` + + // SignatureRrdatas: DNSSEC generated signatures for all the rrdata + // within this item. Note that if health checked targets are provided + // for DNSSEC enabled zones, there's a restriction of 1 ip per item. . + SignatureRrdatas []string `json:"signatureRrdatas,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RRSetRoutingPolicyGeoPolicyGeoPolicyItem) MarshalJSON() ([]byte, error) { + type NoMethod RRSetRoutingPolicyGeoPolicyGeoPolicyItem + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RRSetRoutingPolicyWrrPolicy: Configures a RRSetRoutingPolicy that +// routes in a weighted round robin fashion. +type RRSetRoutingPolicyWrrPolicy struct { + Items []*RRSetRoutingPolicyWrrPolicyWrrPolicyItem `json:"items,omitempty"` + + Kind string `json:"kind,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RRSetRoutingPolicyWrrPolicy) MarshalJSON() ([]byte, error) { + type NoMethod RRSetRoutingPolicyWrrPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RRSetRoutingPolicyWrrPolicyWrrPolicyItem: A routing block which +// contains the routing information for one WRR item. +type RRSetRoutingPolicyWrrPolicyWrrPolicyItem struct { + Kind string `json:"kind,omitempty"` + + Rrdatas []string `json:"rrdatas,omitempty"` + + // SignatureRrdatas: DNSSEC generated signatures for all the rrdata + // within this item. Note that if health checked targets are provided + // for DNSSEC enabled zones, there's a restriction of 1 ip per item. . + SignatureRrdatas []string `json:"signatureRrdatas,omitempty"` + + // Weight: The weight corresponding to this subset of rrdata. When + // multiple WeightedRoundRobinPolicyItems are configured, the + // probability of returning an rrset is proportional to its weight + // relative to the sum of weights configured for all items. This weight + // should be non-negative. + Weight float64 `json:"weight,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RRSetRoutingPolicyWrrPolicyWrrPolicyItem) MarshalJSON() ([]byte, error) { + type NoMethod RRSetRoutingPolicyWrrPolicyWrrPolicyItem + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *RRSetRoutingPolicyWrrPolicyWrrPolicyItem) UnmarshalJSON(data []byte) error { + type NoMethod RRSetRoutingPolicyWrrPolicyWrrPolicyItem + var s1 struct { + Weight gensupport.JSONFloat64 `json:"weight"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Weight = float64(s1.Weight) + return nil +} + +// ResourceRecordSet: A unit of data that is returned by the DNS +// servers. +type ResourceRecordSet struct { + Kind string `json:"kind,omitempty"` + + // Name: For example, www.example.com. + Name string `json:"name,omitempty"` + + // RoutingPolicy: Configures dynamic query responses based on geo + // location of querying user or a weighted round robin based routing + // policy. A ResourceRecordSet should only have either rrdata (static) + // or routing_policy (dynamic). An error is returned otherwise. + RoutingPolicy *RRSetRoutingPolicy `json:"routingPolicy,omitempty"` + + // Rrdatas: As defined in RFC 1035 (section 5) and RFC 1034 (section + // 3.6.1) -- see examples. + Rrdatas []string `json:"rrdatas,omitempty"` + + // SignatureRrdatas: As defined in RFC 4034 (section 3.2). + SignatureRrdatas []string `json:"signatureRrdatas,omitempty"` // Ttl: Number of seconds that this ResourceRecordSet can be cached by // resolvers. @@ -1832,50 +2608,3006 @@ type ResponseHeader struct { NullFields []string `json:"-"` } -func (s *ResponseHeader) MarshalJSON() ([]byte, error) { - type NoMethod ResponseHeader - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +func (s *ResponseHeader) MarshalJSON() ([]byte, error) { + type NoMethod ResponseHeader + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ResponsePoliciesListResponse struct { + Header *ResponseHeader `json:"header,omitempty"` + + // NextPageToken: The presence of this field indicates that there exist + // more results following your last page of results in pagination order. + // To fetch them, make another list request using this value as your + // page token. This lets you the complete contents of even very large + // collections one page at a time. However, if the contents of the + // collection change between the first and last paginated list request, + // the set of all elements returned are an inconsistent view of the + // collection. You cannot retrieve a consistent snapshot of a collection + // larger than the maximum page size. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ResponsePolicies: The Response Policy resources. + ResponsePolicies []*ResponsePolicy `json:"responsePolicies,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Header") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Header") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResponsePoliciesListResponse) MarshalJSON() ([]byte, error) { + type NoMethod ResponsePoliciesListResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ResponsePoliciesPatchResponse struct { + Header *ResponseHeader `json:"header,omitempty"` + + ResponsePolicy *ResponsePolicy `json:"responsePolicy,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Header") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Header") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResponsePoliciesPatchResponse) MarshalJSON() ([]byte, error) { + type NoMethod ResponsePoliciesPatchResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ResponsePoliciesUpdateResponse struct { + Header *ResponseHeader `json:"header,omitempty"` + + ResponsePolicy *ResponsePolicy `json:"responsePolicy,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Header") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Header") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResponsePoliciesUpdateResponse) MarshalJSON() ([]byte, error) { + type NoMethod ResponsePoliciesUpdateResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ResponsePolicy: A Response Policy is a collection of selectors that +// apply to queries made against one or more Virtual Private Cloud +// networks. +type ResponsePolicy struct { + // Description: User-provided description for this Response Policy. + Description string `json:"description,omitempty"` + + // Id: Unique identifier for the resource; defined by the server (output + // only). + Id int64 `json:"id,omitempty,string"` + + Kind string `json:"kind,omitempty"` + + // Networks: List of network names specifying networks to which this + // policy is applied. + Networks []*ResponsePolicyNetwork `json:"networks,omitempty"` + + // ResponsePolicyName: User assigned name for this Response Policy. + ResponsePolicyName string `json:"responsePolicyName,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResponsePolicy) MarshalJSON() ([]byte, error) { + type NoMethod ResponsePolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ResponsePolicyNetwork struct { + Kind string `json:"kind,omitempty"` + + // NetworkUrl: The fully qualified URL of the VPC network to bind to. + // This should be formatted like + // https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network} + NetworkUrl string `json:"networkUrl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResponsePolicyNetwork) MarshalJSON() ([]byte, error) { + type NoMethod ResponsePolicyNetwork + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ResponsePolicyRule: A Response Policy Rule is a selector that applies +// its behavior to queries that match the selector. Selectors are DNS +// names, which may be wildcards or exact matches. Each DNS query +// subject to a Response Policy matches at most one ResponsePolicyRule, +// as identified by the dns_name field with the longest matching suffix. +type ResponsePolicyRule struct { + // Behavior: Answer this query with a behavior rather than DNS data. + // + // Possible values: + // "behaviorUnspecified" + // "bypassResponsePolicy" - Skip a less-specific ResponsePolicyRule + // and continue normal query logic. This can be used with a + // less-specific wildcard selector to exempt a subset of the wildcard + // ResponsePolicyRule from the ResponsePolicy behavior and query the + // public Internet instead. For instance, if these rules exist: + // *.example.com -> LocalData 1.2.3.4 foo.example.com -> Behavior + // 'bypassResponsePolicy' Then a query for 'foo.example.com' skips the + // wildcard. This additionally functions to facilitate the allowlist + // feature. RPZs can be applied to multiple levels in the (eventually + // org, folder, project, network) hierarchy. If a rule is applied at a + // higher level of the hierarchy, adding a passthru rule at a lower + // level will supersede that, and a query from an affected vm to that + // domain will be exempt from the RPZ and proceed to normal resolution + // behavior. + Behavior string `json:"behavior,omitempty"` + + // DnsName: The DNS name (wildcard or exact) to apply this rule to. Must + // be unique within the Response Policy Rule. + DnsName string `json:"dnsName,omitempty"` + + Kind string `json:"kind,omitempty"` + + // LocalData: Answer this query directly with DNS data. These + // ResourceRecordSets override any other DNS behavior for the matched + // name; in particular they override private zones, the public internet, + // and GCP internal DNS. No SOA nor NS types are allowed. + LocalData *ResponsePolicyRuleLocalData `json:"localData,omitempty"` + + // RuleName: An identifier for this rule. Must be unique with the + // ResponsePolicy. + RuleName string `json:"ruleName,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Behavior") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Behavior") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResponsePolicyRule) MarshalJSON() ([]byte, error) { + type NoMethod ResponsePolicyRule + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ResponsePolicyRuleLocalData struct { + // LocalDatas: All resource record sets for this selector, one per + // resource record type. The name must match the dns_name. + LocalDatas []*ResourceRecordSet `json:"localDatas,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LocalDatas") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LocalDatas") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResponsePolicyRuleLocalData) MarshalJSON() ([]byte, error) { + type NoMethod ResponsePolicyRuleLocalData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ResponsePolicyRulesListResponse struct { + Header *ResponseHeader `json:"header,omitempty"` + + // NextPageToken: The presence of this field indicates that there exist + // more results following your last page of results in pagination order. + // To fetch them, make another list request using this value as your + // page token. This lets you the complete contents of even very large + // collections one page at a time. However, if the contents of the + // collection change between the first and last paginated list request, + // the set of all elements returned are an inconsistent view of the + // collection. You cannot retrieve a consistent snapshot of a collection + // larger than the maximum page size. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ResponsePolicyRules: The Response Policy Rule resources. + ResponsePolicyRules []*ResponsePolicyRule `json:"responsePolicyRules,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Header") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Header") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResponsePolicyRulesListResponse) MarshalJSON() ([]byte, error) { + type NoMethod ResponsePolicyRulesListResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ResponsePolicyRulesPatchResponse struct { + Header *ResponseHeader `json:"header,omitempty"` + + ResponsePolicyRule *ResponsePolicyRule `json:"responsePolicyRule,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Header") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Header") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResponsePolicyRulesPatchResponse) MarshalJSON() ([]byte, error) { + type NoMethod ResponsePolicyRulesPatchResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ResponsePolicyRulesUpdateResponse struct { + Header *ResponseHeader `json:"header,omitempty"` + + ResponsePolicyRule *ResponsePolicyRule `json:"responsePolicyRule,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Header") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Header") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResponsePolicyRulesUpdateResponse) MarshalJSON() ([]byte, error) { + type NoMethod ResponsePolicyRulesUpdateResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "dns.changes.create": + +type ChangesCreateCall struct { + s *Service + project string + managedZone string + change *Change + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Atomically updates the ResourceRecordSet collection. +// +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. +func (r *ChangesService) Create(project string, managedZone string, change *Change) *ChangesCreateCall { + c := &ChangesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + c.change = change + return c +} + +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ChangesCreateCall) ClientOperationId(clientOperationId string) *ChangesCreateCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChangesCreateCall) Fields(s ...googleapi.Field) *ChangesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChangesCreateCall) Context(ctx context.Context) *ChangesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ChangesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ChangesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.change) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/changes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.changes.create" call. +// Exactly one of *Change or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Change.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ChangesCreateCall) Do(opts ...googleapi.CallOption) (*Change, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Change{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Atomically updates the ResourceRecordSet collection.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/changes", + // "httpMethod": "POST", + // "id": "dns.changes.create", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/changes", + // "request": { + // "$ref": "Change" + // }, + // "response": { + // "$ref": "Change" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.changes.get": + +type ChangesGetCall struct { + s *Service + project string + managedZone string + changeId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Fetches the representation of an existing Change. +// +// - changeId: The identifier of the requested change, from a previous +// ResourceRecordSetsChangeResponse. +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. +func (r *ChangesService) Get(project string, managedZone string, changeId string) *ChangesGetCall { + c := &ChangesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + c.changeId = changeId + return c +} + +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ChangesGetCall) ClientOperationId(clientOperationId string) *ChangesGetCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChangesGetCall) Fields(s ...googleapi.Field) *ChangesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ChangesGetCall) IfNoneMatch(entityTag string) *ChangesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChangesGetCall) Context(ctx context.Context) *ChangesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ChangesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ChangesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/changes/{changeId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + "changeId": c.changeId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.changes.get" call. +// Exactly one of *Change or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Change.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ChangesGetCall) Do(opts ...googleapi.CallOption) (*Change, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Change{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Fetches the representation of an existing Change.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/changes/{changeId}", + // "httpMethod": "GET", + // "id": "dns.changes.get", + // "parameterOrder": [ + // "project", + // "managedZone", + // "changeId" + // ], + // "parameters": { + // "changeId": { + // "description": "The identifier of the requested change, from a previous ResourceRecordSetsChangeResponse.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/changes/{changeId}", + // "response": { + // "$ref": "Change" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.changes.list": + +type ChangesListCall struct { + s *Service + project string + managedZone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Enumerates Changes to a ResourceRecordSet collection. +// +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. +func (r *ChangesService) List(project string, managedZone string) *ChangesListCall { + c := &ChangesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to be returned. If unspecified, the server decides how +// many results to return. +func (c *ChangesListCall) MaxResults(maxResults int64) *ChangesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A tag returned by +// a previous list request that was truncated. Use this parameter to +// continue a previous list request. +func (c *ChangesListCall) PageToken(pageToken string) *ChangesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// SortBy sets the optional parameter "sortBy": Sorting criterion. The +// only supported value is change sequence. +// +// Possible values: +// "changeSequence" (default) +func (c *ChangesListCall) SortBy(sortBy string) *ChangesListCall { + c.urlParams_.Set("sortBy", sortBy) + return c +} + +// SortOrder sets the optional parameter "sortOrder": Sorting order +// direction: 'ascending' or 'descending'. +func (c *ChangesListCall) SortOrder(sortOrder string) *ChangesListCall { + c.urlParams_.Set("sortOrder", sortOrder) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChangesListCall) Fields(s ...googleapi.Field) *ChangesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ChangesListCall) IfNoneMatch(entityTag string) *ChangesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChangesListCall) Context(ctx context.Context) *ChangesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ChangesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/changes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.changes.list" call. +// Exactly one of *ChangesListResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ChangesListResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangesListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ChangesListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enumerates Changes to a ResourceRecordSet collection.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/changes", + // "httpMethod": "GET", + // "id": "dns.changes.list", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sortBy": { + // "default": "changeSequence", + // "description": "Sorting criterion. The only supported value is change sequence.", + // "enum": [ + // "changeSequence" + // ], + // "enumDescriptions": [ + // "" + // ], + // "location": "query", + // "type": "string" + // }, + // "sortOrder": { + // "description": "Sorting order direction: 'ascending' or 'descending'.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/changes", + // "response": { + // "$ref": "ChangesListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ChangesListCall) Pages(ctx context.Context, f func(*ChangesListResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "dns.dnsKeys.get": + +type DnsKeysGetCall struct { + s *Service + project string + managedZone string + dnsKeyId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Fetches the representation of an existing DnsKey. +// +// - dnsKeyId: The identifier of the requested DnsKey. +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. +func (r *DnsKeysService) Get(project string, managedZone string, dnsKeyId string) *DnsKeysGetCall { + c := &DnsKeysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + c.dnsKeyId = dnsKeyId + return c +} + +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *DnsKeysGetCall) ClientOperationId(clientOperationId string) *DnsKeysGetCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + +// DigestType sets the optional parameter "digestType": An optional +// comma-separated list of digest types to compute and display for key +// signing keys. If omitted, the recommended digest type is computed and +// displayed. +func (c *DnsKeysGetCall) DigestType(digestType string) *DnsKeysGetCall { + c.urlParams_.Set("digestType", digestType) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DnsKeysGetCall) Fields(s ...googleapi.Field) *DnsKeysGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DnsKeysGetCall) IfNoneMatch(entityTag string) *DnsKeysGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DnsKeysGetCall) Context(ctx context.Context) *DnsKeysGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DnsKeysGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DnsKeysGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + "dnsKeyId": c.dnsKeyId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.dnsKeys.get" call. +// Exactly one of *DnsKey or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DnsKey.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *DnsKeysGetCall) Do(opts ...googleapi.CallOption) (*DnsKey, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DnsKey{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Fetches the representation of an existing DnsKey.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}", + // "httpMethod": "GET", + // "id": "dns.dnsKeys.get", + // "parameterOrder": [ + // "project", + // "managedZone", + // "dnsKeyId" + // ], + // "parameters": { + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, + // "digestType": { + // "description": "An optional comma-separated list of digest types to compute and display for key signing keys. If omitted, the recommended digest type is computed and displayed.", + // "location": "query", + // "type": "string" + // }, + // "dnsKeyId": { + // "description": "The identifier of the requested DnsKey.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}", + // "response": { + // "$ref": "DnsKey" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.dnsKeys.list": + +type DnsKeysListCall struct { + s *Service + project string + managedZone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Enumerates DnsKeys to a ResourceRecordSet collection. +// +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. +func (r *DnsKeysService) List(project string, managedZone string) *DnsKeysListCall { + c := &DnsKeysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + return c +} + +// DigestType sets the optional parameter "digestType": An optional +// comma-separated list of digest types to compute and display for key +// signing keys. If omitted, the recommended digest type is computed and +// displayed. +func (c *DnsKeysListCall) DigestType(digestType string) *DnsKeysListCall { + c.urlParams_.Set("digestType", digestType) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to be returned. If unspecified, the server decides how +// many results to return. +func (c *DnsKeysListCall) MaxResults(maxResults int64) *DnsKeysListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A tag returned by +// a previous list request that was truncated. Use this parameter to +// continue a previous list request. +func (c *DnsKeysListCall) PageToken(pageToken string) *DnsKeysListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DnsKeysListCall) Fields(s ...googleapi.Field) *DnsKeysListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DnsKeysListCall) IfNoneMatch(entityTag string) *DnsKeysListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DnsKeysListCall) Context(ctx context.Context) *DnsKeysListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DnsKeysListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DnsKeysListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.dnsKeys.list" call. +// Exactly one of *DnsKeysListResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *DnsKeysListResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DnsKeysListCall) Do(opts ...googleapi.CallOption) (*DnsKeysListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DnsKeysListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enumerates DnsKeys to a ResourceRecordSet collection.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys", + // "httpMethod": "GET", + // "id": "dns.dnsKeys.list", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "digestType": { + // "description": "An optional comma-separated list of digest types to compute and display for key signing keys. If omitted, the recommended digest type is computed and displayed.", + // "location": "query", + // "type": "string" + // }, + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys", + // "response": { + // "$ref": "DnsKeysListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DnsKeysListCall) Pages(ctx context.Context, f func(*DnsKeysListResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "dns.managedZoneOperations.get": + +type ManagedZoneOperationsGetCall struct { + s *Service + project string + managedZone string + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Fetches the representation of an existing Operation. +// +// - managedZone: Identifies the managed zone addressed by this request. +// - operation: Identifies the operation addressed by this request (ID +// of the operation). +// - project: Identifies the project addressed by this request. +func (r *ManagedZoneOperationsService) Get(project string, managedZone string, operation string) *ManagedZoneOperationsGetCall { + c := &ManagedZoneOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + c.operation = operation + return c +} + +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ManagedZoneOperationsGetCall) ClientOperationId(clientOperationId string) *ManagedZoneOperationsGetCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedZoneOperationsGetCall) Fields(s ...googleapi.Field) *ManagedZoneOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ManagedZoneOperationsGetCall) IfNoneMatch(entityTag string) *ManagedZoneOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedZoneOperationsGetCall) Context(ctx context.Context) *ManagedZoneOperationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedZoneOperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/operations/{operation}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + "operation": c.operation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.managedZoneOperations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ManagedZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Fetches the representation of an existing Operation.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/operations/{operation}", + // "httpMethod": "GET", + // "id": "dns.managedZoneOperations.get", + // "parameterOrder": [ + // "project", + // "managedZone", + // "operation" + // ], + // "parameters": { + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "operation": { + // "description": "Identifies the operation addressed by this request (ID of the operation).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/operations/{operation}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.managedZoneOperations.list": + +type ManagedZoneOperationsListCall struct { + s *Service + project string + managedZone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Enumerates Operations for the given ManagedZone. +// +// - managedZone: Identifies the managed zone addressed by this request. +// - project: Identifies the project addressed by this request. +func (r *ManagedZoneOperationsService) List(project string, managedZone string) *ManagedZoneOperationsListCall { + c := &ManagedZoneOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to be returned. If unspecified, the server decides how +// many results to return. +func (c *ManagedZoneOperationsListCall) MaxResults(maxResults int64) *ManagedZoneOperationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A tag returned by +// a previous list request that was truncated. Use this parameter to +// continue a previous list request. +func (c *ManagedZoneOperationsListCall) PageToken(pageToken string) *ManagedZoneOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// SortBy sets the optional parameter "sortBy": Sorting criterion. The +// only supported values are START_TIME and ID. +// +// Possible values: +// "startTime" (default) +// "id" +func (c *ManagedZoneOperationsListCall) SortBy(sortBy string) *ManagedZoneOperationsListCall { + c.urlParams_.Set("sortBy", sortBy) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedZoneOperationsListCall) Fields(s ...googleapi.Field) *ManagedZoneOperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ManagedZoneOperationsListCall) IfNoneMatch(entityTag string) *ManagedZoneOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedZoneOperationsListCall) Context(ctx context.Context) *ManagedZoneOperationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedZoneOperationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/operations") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.managedZoneOperations.list" call. +// Exactly one of *ManagedZoneOperationsListResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ManagedZoneOperationsListResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ManagedZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*ManagedZoneOperationsListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ManagedZoneOperationsListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enumerates Operations for the given ManagedZone.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/operations", + // "httpMethod": "GET", + // "id": "dns.managedZoneOperations.list", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sortBy": { + // "default": "startTime", + // "description": "Sorting criterion. The only supported values are START_TIME and ID.", + // "enum": [ + // "startTime", + // "id" + // ], + // "enumDescriptions": [ + // "", + // "" + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/operations", + // "response": { + // "$ref": "ManagedZoneOperationsListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ManagedZoneOperationsListCall) Pages(ctx context.Context, f func(*ManagedZoneOperationsListResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "dns.managedZones.create": + +type ManagedZonesCreateCall struct { + s *Service + project string + managedzone *ManagedZone + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a new ManagedZone. +// +// - project: Identifies the project addressed by this request. +func (r *ManagedZonesService) Create(project string, managedzone *ManagedZone) *ManagedZonesCreateCall { + c := &ManagedZonesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedzone = managedzone + return c +} + +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ManagedZonesCreateCall) ClientOperationId(clientOperationId string) *ManagedZonesCreateCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedZonesCreateCall) Fields(s ...googleapi.Field) *ManagedZonesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedZonesCreateCall) Context(ctx context.Context) *ManagedZonesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedZonesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedZonesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedzone) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.managedZones.create" call. +// Exactly one of *ManagedZone or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ManagedZone.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ManagedZonesCreateCall) Do(opts ...googleapi.CallOption) (*ManagedZone, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ManagedZone{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new ManagedZone.", + // "flatPath": "dns/v1/projects/{project}/managedZones", + // "httpMethod": "POST", + // "id": "dns.managedZones.create", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "dns/v1/projects/{project}/managedZones", + // "request": { + // "$ref": "ManagedZone" + // }, + // "response": { + // "$ref": "ManagedZone" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.managedZones.delete": + +type ManagedZonesDeleteCall struct { + s *Service + project string + managedZone string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a previously created ManagedZone. +// +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. +func (r *ManagedZonesService) Delete(project string, managedZone string) *ManagedZonesDeleteCall { + c := &ManagedZonesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + return c +} + +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ManagedZonesDeleteCall) ClientOperationId(clientOperationId string) *ManagedZonesDeleteCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedZonesDeleteCall) Fields(s ...googleapi.Field) *ManagedZonesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedZonesDeleteCall) Context(ctx context.Context) *ManagedZonesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedZonesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedZonesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.managedZones.delete" call. +func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes a previously created ManagedZone.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", + // "httpMethod": "DELETE", + // "id": "dns.managedZones.delete", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.managedZones.get": + +type ManagedZonesGetCall struct { + s *Service + project string + managedZone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Fetches the representation of an existing ManagedZone. +// +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. +func (r *ManagedZonesService) Get(project string, managedZone string) *ManagedZonesGetCall { + c := &ManagedZonesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + return c +} + +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ManagedZonesGetCall) ClientOperationId(clientOperationId string) *ManagedZonesGetCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedZonesGetCall) Fields(s ...googleapi.Field) *ManagedZonesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ManagedZonesGetCall) IfNoneMatch(entityTag string) *ManagedZonesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedZonesGetCall) Context(ctx context.Context) *ManagedZonesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedZonesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedZonesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.managedZones.get" call. +// Exactly one of *ManagedZone or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ManagedZone.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ManagedZonesGetCall) Do(opts ...googleapi.CallOption) (*ManagedZone, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ManagedZone{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Fetches the representation of an existing ManagedZone.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", + // "httpMethod": "GET", + // "id": "dns.managedZones.get", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}", + // "response": { + // "$ref": "ManagedZone" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.managedZones.getIamPolicy": + +type ManagedZonesGetIamPolicyCall struct { + s *Service + resource string + googleiamv1getiampolicyrequest *GoogleIamV1GetIamPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a resource. Returns +// an empty policy if the resource exists and does not have a policy +// set. +// +// - resource: REQUIRED: The resource for which the policy is being +// requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. +func (r *ManagedZonesService) GetIamPolicy(resource string, googleiamv1getiampolicyrequest *GoogleIamV1GetIamPolicyRequest) *ManagedZonesGetIamPolicyCall { + c := &ManagedZonesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.googleiamv1getiampolicyrequest = googleiamv1getiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedZonesGetIamPolicyCall) Fields(s ...googleapi.Field) *ManagedZonesGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedZonesGetIamPolicyCall) Context(ctx context.Context) *ManagedZonesGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedZonesGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedZonesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleiamv1getiampolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/{+resource}:getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.managedZones.getIamPolicy" call. +// Exactly one of *GoogleIamV1Policy or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GoogleIamV1Policy.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ManagedZonesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*GoogleIamV1Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleIamV1Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", + // "flatPath": "dns/v1/projects/{projectsId}/managedZones/{managedZonesId}:getIamPolicy", + // "httpMethod": "POST", + // "id": "dns.managedZones.getIamPolicy", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + // "location": "path", + // "pattern": "^projects/[^/]+/managedZones/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "dns/v1/{+resource}:getIamPolicy", + // "request": { + // "$ref": "GoogleIamV1GetIamPolicyRequest" + // }, + // "response": { + // "$ref": "GoogleIamV1Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.managedZones.list": + +type ManagedZonesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Enumerates ManagedZones that have been created but not yet +// deleted. +// +// - project: Identifies the project addressed by this request. +func (r *ManagedZonesService) List(project string) *ManagedZonesListCall { + c := &ManagedZonesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// DnsName sets the optional parameter "dnsName": Restricts the list to +// return only zones with this domain name. +func (c *ManagedZonesListCall) DnsName(dnsName string) *ManagedZonesListCall { + c.urlParams_.Set("dnsName", dnsName) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to be returned. If unspecified, the server decides how +// many results to return. +func (c *ManagedZonesListCall) MaxResults(maxResults int64) *ManagedZonesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A tag returned by +// a previous list request that was truncated. Use this parameter to +// continue a previous list request. +func (c *ManagedZonesListCall) PageToken(pageToken string) *ManagedZonesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedZonesListCall) Fields(s ...googleapi.Field) *ManagedZonesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ManagedZonesListCall) IfNoneMatch(entityTag string) *ManagedZonesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedZonesListCall) Context(ctx context.Context) *ManagedZonesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedZonesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedZonesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.managedZones.list" call. +// Exactly one of *ManagedZonesListResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ManagedZonesListResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ManagedZonesListCall) Do(opts ...googleapi.CallOption) (*ManagedZonesListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ManagedZonesListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enumerates ManagedZones that have been created but not yet deleted.", + // "flatPath": "dns/v1/projects/{project}/managedZones", + // "httpMethod": "GET", + // "id": "dns.managedZones.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "dnsName": { + // "description": "Restricts the list to return only zones with this domain name.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "dns/v1/projects/{project}/managedZones", + // "response": { + // "$ref": "ManagedZonesListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ManagedZonesListCall) Pages(ctx context.Context, f func(*ManagedZonesListResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "dns.managedZones.patch": + +type ManagedZonesPatchCall struct { + s *Service + project string + managedZone string + managedzone *ManagedZone + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Applies a partial update to an existing ManagedZone. +// +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. +func (r *ManagedZonesService) Patch(project string, managedZone string, managedzone *ManagedZone) *ManagedZonesPatchCall { + c := &ManagedZonesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + c.managedzone = managedzone + return c +} + +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ManagedZonesPatchCall) ClientOperationId(clientOperationId string) *ManagedZonesPatchCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedZonesPatchCall) Fields(s ...googleapi.Field) *ManagedZonesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedZonesPatchCall) Context(ctx context.Context) *ManagedZonesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedZonesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedZonesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedzone) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.managedZones.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ManagedZonesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Applies a partial update to an existing ManagedZone.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", + // "httpMethod": "PATCH", + // "id": "dns.managedZones.patch", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}", + // "request": { + // "$ref": "ManagedZone" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.managedZones.setIamPolicy": + +type ManagedZonesSetIamPolicyCall struct { + s *Service + resource string + googleiamv1setiampolicyrequest *GoogleIamV1SetIamPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. Can return `NOT_FOUND`, +// `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. +// +// - resource: REQUIRED: The resource for which the policy is being +// specified. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. +func (r *ManagedZonesService) SetIamPolicy(resource string, googleiamv1setiampolicyrequest *GoogleIamV1SetIamPolicyRequest) *ManagedZonesSetIamPolicyCall { + c := &ManagedZonesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.googleiamv1setiampolicyrequest = googleiamv1setiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedZonesSetIamPolicyCall) Fields(s ...googleapi.Field) *ManagedZonesSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedZonesSetIamPolicyCall) Context(ctx context.Context) *ManagedZonesSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedZonesSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedZonesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleiamv1setiampolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/{+resource}:setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.managedZones.setIamPolicy" call. +// Exactly one of *GoogleIamV1Policy or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GoogleIamV1Policy.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ManagedZonesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*GoogleIamV1Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleIamV1Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", + // "flatPath": "dns/v1/projects/{projectsId}/managedZones/{managedZonesId}:setIamPolicy", + // "httpMethod": "POST", + // "id": "dns.managedZones.setIamPolicy", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + // "location": "path", + // "pattern": "^projects/[^/]+/managedZones/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "dns/v1/{+resource}:setIamPolicy", + // "request": { + // "$ref": "GoogleIamV1SetIamPolicyRequest" + // }, + // "response": { + // "$ref": "GoogleIamV1Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + } -// method id "dns.changes.create": +// method id "dns.managedZones.testIamPermissions": -type ChangesCreateCall struct { - s *Service - project string - managedZone string - change *Change - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ManagedZonesTestIamPermissionsCall struct { + s *Service + resource string + googleiamv1testiampermissionsrequest *GoogleIamV1TestIamPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Create: Atomically updates the ResourceRecordSet collection. +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. If the resource does not exist, this will return +// an empty set of permissions, not a `NOT_FOUND` error. Note: This +// operation is designed to be used for building permission-aware UIs +// and command-line tools, not for authorization checking. This +// operation may "fail open" without warning. // -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. -// - project: Identifies the project addressed by this request. -func (r *ChangesService) Create(project string, managedZone string, change *Change) *ChangesCreateCall { - c := &ChangesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.managedZone = managedZone - c.change = change - return c -} - -// ClientOperationId sets the optional parameter "clientOperationId": -// For mutating operation requests only. An optional identifier -// specified by the client. Must be unique for operation resources in -// the Operations collection. -func (c *ChangesCreateCall) ClientOperationId(clientOperationId string) *ChangesCreateCall { - c.urlParams_.Set("clientOperationId", clientOperationId) +// - resource: REQUIRED: The resource for which the policy detail is +// being requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. +func (r *ManagedZonesService) TestIamPermissions(resource string, googleiamv1testiampermissionsrequest *GoogleIamV1TestIamPermissionsRequest) *ManagedZonesTestIamPermissionsCall { + c := &ManagedZonesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.googleiamv1testiampermissionsrequest = googleiamv1testiampermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ChangesCreateCall) Fields(s ...googleapi.Field) *ChangesCreateCall { +func (c *ManagedZonesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ManagedZonesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -1883,36 +5615,36 @@ func (c *ChangesCreateCall) Fields(s ...googleapi.Field) *ChangesCreateCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ChangesCreateCall) Context(ctx context.Context) *ChangesCreateCall { +func (c *ManagedZonesTestIamPermissionsCall) Context(ctx context.Context) *ManagedZonesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ChangesCreateCall) Header() http.Header { +func (c *ManagedZonesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ChangesCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *ManagedZonesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.change) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleiamv1testiampermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/changes") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/{+resource}:testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -1920,20 +5652,20 @@ func (c *ChangesCreateCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "managedZone": c.managedZone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.changes.create" call. -// Exactly one of *Change or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Change.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ChangesCreateCall) Do(opts ...googleapi.CallOption) (*Change, error) { +// Do executes the "dns.managedZones.testIamPermissions" call. +// Exactly one of *GoogleIamV1TestIamPermissionsResponse or error will +// be non-nil. Any non-2xx status code is an error. Response headers are +// in either +// *GoogleIamV1TestIamPermissionsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ManagedZonesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*GoogleIamV1TestIamPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -1952,7 +5684,7 @@ func (c *ChangesCreateCall) Do(opts ...googleapi.CallOption) (*Change, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Change{ + ret := &GoogleIamV1TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -1964,73 +5696,61 @@ func (c *ChangesCreateCall) Do(opts ...googleapi.CallOption) (*Change, error) { } return ret, nil // { - // "description": "Atomically updates the ResourceRecordSet collection.", - // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/changes", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", + // "flatPath": "dns/v1/projects/{projectsId}/managedZones/{managedZonesId}:testIamPermissions", // "httpMethod": "POST", - // "id": "dns.changes.create", + // "id": "dns.managedZones.testIamPermissions", // "parameterOrder": [ - // "project", - // "managedZone" + // "resource" // ], // "parameters": { - // "clientOperationId": { - // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", - // "location": "query", - // "type": "string" - // }, - // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Identifies the project addressed by this request.", + // "resource": { + // "description": "REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", // "location": "path", + // "pattern": "^projects/[^/]+/managedZones/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/changes", + // "path": "dns/v1/{+resource}:testIamPermissions", // "request": { - // "$ref": "Change" + // "$ref": "GoogleIamV1TestIamPermissionsRequest" // }, // "response": { - // "$ref": "Change" + // "$ref": "GoogleIamV1TestIamPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" // ] // } } -// method id "dns.changes.get": +// method id "dns.managedZones.update": -type ChangesGetCall struct { - s *Service - project string - managedZone string - changeId string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ManagedZonesUpdateCall struct { + s *Service + project string + managedZone string + managedzone *ManagedZone + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Fetches the representation of an existing Change. +// Update: Updates an existing ManagedZone. // -// - changeId: The identifier of the requested change, from a previous -// ResourceRecordSetsChangeResponse. // - managedZone: Identifies the managed zone addressed by this request. // Can be the managed zone name or ID. // - project: Identifies the project addressed by this request. -func (r *ChangesService) Get(project string, managedZone string, changeId string) *ChangesGetCall { - c := &ChangesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ManagedZonesService) Update(project string, managedZone string, managedzone *ManagedZone) *ManagedZonesUpdateCall { + c := &ManagedZonesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.managedZone = managedZone - c.changeId = changeId + c.managedzone = managedzone return c } @@ -2038,7 +5758,7 @@ func (r *ChangesService) Get(project string, managedZone string, changeId string // For mutating operation requests only. An optional identifier // specified by the client. Must be unique for operation resources in // the Operations collection. -func (c *ChangesGetCall) ClientOperationId(clientOperationId string) *ChangesGetCall { +func (c *ManagedZonesUpdateCall) ClientOperationId(clientOperationId string) *ManagedZonesUpdateCall { c.urlParams_.Set("clientOperationId", clientOperationId) return c } @@ -2046,54 +5766,46 @@ func (c *ChangesGetCall) ClientOperationId(clientOperationId string) *ChangesGet // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ChangesGetCall) Fields(s ...googleapi.Field) *ChangesGetCall { +func (c *ManagedZonesUpdateCall) Fields(s ...googleapi.Field) *ManagedZonesUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ChangesGetCall) IfNoneMatch(entityTag string) *ChangesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ChangesGetCall) Context(ctx context.Context) *ChangesGetCall { +func (c *ManagedZonesUpdateCall) Context(ctx context.Context) *ManagedZonesUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ChangesGetCall) Header() http.Header { +func (c *ManagedZonesUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ChangesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ManagedZonesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedzone) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/changes/{changeId}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } @@ -2101,19 +5813,18 @@ func (c *ChangesGetCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "project": c.project, "managedZone": c.managedZone, - "changeId": c.changeId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.changes.get" call. -// Exactly one of *Change or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Change.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ChangesGetCall) Do(opts ...googleapi.CallOption) (*Change, error) { +// Do executes the "dns.managedZones.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ManagedZonesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -2132,7 +5843,7 @@ func (c *ChangesGetCall) Do(opts ...googleapi.CallOption) (*Change, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Change{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -2144,22 +5855,15 @@ func (c *ChangesGetCall) Do(opts ...googleapi.CallOption) (*Change, error) { } return ret, nil // { - // "description": "Fetches the representation of an existing Change.", - // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/changes/{changeId}", - // "httpMethod": "GET", - // "id": "dns.changes.get", + // "description": "Updates an existing ManagedZone.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", + // "httpMethod": "PUT", + // "id": "dns.managedZones.update", // "parameterOrder": [ // "project", - // "managedZone", - // "changeId" + // "managedZone" // ], // "parameters": { - // "changeId": { - // "description": "The identifier of the requested change, from a previous ResourceRecordSetsChangeResponse.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "clientOperationId": { // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", // "location": "query", @@ -2178,147 +5882,112 @@ func (c *ChangesGetCall) Do(opts ...googleapi.CallOption) (*Change, error) { // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/changes/{changeId}", + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}", + // "request": { + // "$ref": "ManagedZone" + // }, // "response": { - // "$ref": "Change" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/ndev.clouddns.readonly", // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" // ] // } } -// method id "dns.changes.list": - -type ChangesListCall struct { - s *Service - project string - managedZone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Enumerates Changes to a ResourceRecordSet collection. -// -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. -// - project: Identifies the project addressed by this request. -func (r *ChangesService) List(project string, managedZone string) *ChangesListCall { - c := &ChangesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.managedZone = managedZone - return c -} - -// MaxResults sets the optional parameter "maxResults": Maximum number -// of results to be returned. If unspecified, the server decides how -// many results to return. -func (c *ChangesListCall) MaxResults(maxResults int64) *ChangesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} +// method id "dns.policies.create": -// PageToken sets the optional parameter "pageToken": A tag returned by -// a previous list request that was truncated. Use this parameter to -// continue a previous list request. -func (c *ChangesListCall) PageToken(pageToken string) *ChangesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c +type PoliciesCreateCall struct { + s *Service + project string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SortBy sets the optional parameter "sortBy": Sorting criterion. The -// only supported value is change sequence. +// Create: Creates a new Policy. // -// Possible values: -// "changeSequence" (default) -func (c *ChangesListCall) SortBy(sortBy string) *ChangesListCall { - c.urlParams_.Set("sortBy", sortBy) +// - project: Identifies the project addressed by this request. +func (r *PoliciesService) Create(project string, policy *Policy) *PoliciesCreateCall { + c := &PoliciesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.policy = policy return c } -// SortOrder sets the optional parameter "sortOrder": Sorting order -// direction: 'ascending' or 'descending'. -func (c *ChangesListCall) SortOrder(sortOrder string) *ChangesListCall { - c.urlParams_.Set("sortOrder", sortOrder) +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *PoliciesCreateCall) ClientOperationId(clientOperationId string) *PoliciesCreateCall { + c.urlParams_.Set("clientOperationId", clientOperationId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ChangesListCall) Fields(s ...googleapi.Field) *ChangesListCall { +func (c *PoliciesCreateCall) Fields(s ...googleapi.Field) *PoliciesCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ChangesListCall) IfNoneMatch(entityTag string) *ChangesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ChangesListCall) Context(ctx context.Context) *ChangesListCall { +func (c *PoliciesCreateCall) Context(ctx context.Context) *PoliciesCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ChangesListCall) Header() http.Header { +func (c *PoliciesCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) { +func (c *PoliciesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/changes") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/policies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "managedZone": c.managedZone, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.changes.list" call. -// Exactly one of *ChangesListResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ChangesListResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangesListResponse, error) { +// Do executes the "dns.policies.create" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *PoliciesCreateCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -2337,7 +6006,7 @@ func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangesListResponse if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ChangesListResponse{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -2349,29 +6018,16 @@ func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangesListResponse } return ret, nil // { - // "description": "Enumerates Changes to a ResourceRecordSet collection.", - // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/changes", - // "httpMethod": "GET", - // "id": "dns.changes.list", + // "description": "Creates a new Policy.", + // "flatPath": "dns/v1/projects/{project}/policies", + // "httpMethod": "POST", + // "id": "dns.policies.create", // "parameterOrder": [ - // "project", - // "managedZone" + // "project" // ], // "parameters": { - // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", // "location": "query", // "type": "string" // }, @@ -2380,84 +6036,44 @@ func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangesListResponse // "location": "path", // "required": true, // "type": "string" - // }, - // "sortBy": { - // "default": "changeSequence", - // "description": "Sorting criterion. The only supported value is change sequence.", - // "enum": [ - // "changeSequence" - // ], - // "enumDescriptions": [ - // "" - // ], - // "location": "query", - // "type": "string" - // }, - // "sortOrder": { - // "description": "Sorting order direction: 'ascending' or 'descending'.", - // "location": "query", - // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/changes", + // "path": "dns/v1/projects/{project}/policies", + // "request": { + // "$ref": "Policy" + // }, // "response": { - // "$ref": "ChangesListResponse" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/ndev.clouddns.readonly", // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ChangesListCall) Pages(ctx context.Context, f func(*ChangesListResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "dns.dnsKeys.get": +// method id "dns.policies.delete": -type DnsKeysGetCall struct { - s *Service - project string - managedZone string - dnsKeyId string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type PoliciesDeleteCall struct { + s *Service + project string + policy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Fetches the representation of an existing DnsKey. +// Delete: Deletes a previously created Policy. Fails if the policy is +// still being referenced by a network. // -// - dnsKeyId: The identifier of the requested DnsKey. -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. +// - policy: User given friendly name of the policy addressed by this +// request. // - project: Identifies the project addressed by this request. -func (r *DnsKeysService) Get(project string, managedZone string, dnsKeyId string) *DnsKeysGetCall { - c := &DnsKeysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *PoliciesService) Delete(project string, policy string) *PoliciesDeleteCall { + c := &PoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.managedZone = managedZone - c.dnsKeyId = dnsKeyId + c.policy = policy return c } @@ -2465,129 +6081,80 @@ func (r *DnsKeysService) Get(project string, managedZone string, dnsKeyId string // For mutating operation requests only. An optional identifier // specified by the client. Must be unique for operation resources in // the Operations collection. -func (c *DnsKeysGetCall) ClientOperationId(clientOperationId string) *DnsKeysGetCall { +func (c *PoliciesDeleteCall) ClientOperationId(clientOperationId string) *PoliciesDeleteCall { c.urlParams_.Set("clientOperationId", clientOperationId) return c } -// DigestType sets the optional parameter "digestType": An optional -// comma-separated list of digest types to compute and display for key -// signing keys. If omitted, the recommended digest type is computed and -// displayed. -func (c *DnsKeysGetCall) DigestType(digestType string) *DnsKeysGetCall { - c.urlParams_.Set("digestType", digestType) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DnsKeysGetCall) Fields(s ...googleapi.Field) *DnsKeysGetCall { +func (c *PoliciesDeleteCall) Fields(s ...googleapi.Field) *PoliciesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DnsKeysGetCall) IfNoneMatch(entityTag string) *DnsKeysGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DnsKeysGetCall) Context(ctx context.Context) *DnsKeysGetCall { +func (c *PoliciesDeleteCall) Context(ctx context.Context) *PoliciesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DnsKeysGetCall) Header() http.Header { +func (c *PoliciesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DnsKeysGetCall) doRequest(alt string) (*http.Response, error) { +func (c *PoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/policies/{policy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "managedZone": c.managedZone, - "dnsKeyId": c.dnsKeyId, + "project": c.project, + "policy": c.policy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.dnsKeys.get" call. -// Exactly one of *DnsKey or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DnsKey.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *DnsKeysGetCall) Do(opts ...googleapi.CallOption) (*DnsKey, error) { +// Do executes the "dns.policies.delete" call. +func (c *PoliciesDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &DnsKey{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, + res, err := c.doRequest("json") + if err != nil { + return err } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err } - return ret, nil + return nil // { - // "description": "Fetches the representation of an existing DnsKey.", - // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}", - // "httpMethod": "GET", - // "id": "dns.dnsKeys.get", + // "description": "Deletes a previously created Policy. Fails if the policy is still being referenced by a network.", + // "flatPath": "dns/v1/projects/{project}/policies/{policy}", + // "httpMethod": "DELETE", + // "id": "dns.policies.delete", // "parameterOrder": [ // "project", - // "managedZone", - // "dnsKeyId" + // "policy" // ], // "parameters": { // "clientOperationId": { @@ -2595,19 +6162,8 @@ func (c *DnsKeysGetCall) Do(opts ...googleapi.CallOption) (*DnsKey, error) { // "location": "query", // "type": "string" // }, - // "digestType": { - // "description": "An optional comma-separated list of digest types to compute and display for key signing keys. If omitted, the recommended digest type is computed and displayed.", - // "location": "query", - // "type": "string" - // }, - // "dnsKeyId": { - // "description": "The identifier of the requested DnsKey.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + // "policy": { + // "description": "User given friendly name of the policy addressed by this request.", // "location": "path", // "required": true, // "type": "string" @@ -2619,73 +6175,52 @@ func (c *DnsKeysGetCall) Do(opts ...googleapi.CallOption) (*DnsKey, error) { // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}", - // "response": { - // "$ref": "DnsKey" - // }, + // "path": "dns/v1/projects/{project}/policies/{policy}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/ndev.clouddns.readonly", // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" // ] // } } -// method id "dns.dnsKeys.list": +// method id "dns.policies.get": -type DnsKeysListCall struct { +type PoliciesGetCall struct { s *Service project string - managedZone string + policy string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Enumerates DnsKeys to a ResourceRecordSet collection. +// Get: Fetches the representation of an existing Policy. // -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. +// - policy: User given friendly name of the policy addressed by this +// request. // - project: Identifies the project addressed by this request. -func (r *DnsKeysService) List(project string, managedZone string) *DnsKeysListCall { - c := &DnsKeysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *PoliciesService) Get(project string, policy string) *PoliciesGetCall { + c := &PoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.managedZone = managedZone - return c -} - -// DigestType sets the optional parameter "digestType": An optional -// comma-separated list of digest types to compute and display for key -// signing keys. If omitted, the recommended digest type is computed and -// displayed. -func (c *DnsKeysListCall) DigestType(digestType string) *DnsKeysListCall { - c.urlParams_.Set("digestType", digestType) - return c -} - -// MaxResults sets the optional parameter "maxResults": Maximum number -// of results to be returned. If unspecified, the server decides how -// many results to return. -func (c *DnsKeysListCall) MaxResults(maxResults int64) *DnsKeysListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + c.policy = policy return c } -// PageToken sets the optional parameter "pageToken": A tag returned by -// a previous list request that was truncated. Use this parameter to -// continue a previous list request. -func (c *DnsKeysListCall) PageToken(pageToken string) *DnsKeysListCall { - c.urlParams_.Set("pageToken", pageToken) +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *PoliciesGetCall) ClientOperationId(clientOperationId string) *PoliciesGetCall { + c.urlParams_.Set("clientOperationId", clientOperationId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DnsKeysListCall) Fields(s ...googleapi.Field) *DnsKeysListCall { +func (c *PoliciesGetCall) Fields(s ...googleapi.Field) *PoliciesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -2695,7 +6230,7 @@ func (c *DnsKeysListCall) Fields(s ...googleapi.Field) *DnsKeysListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DnsKeysListCall) IfNoneMatch(entityTag string) *DnsKeysListCall { +func (c *PoliciesGetCall) IfNoneMatch(entityTag string) *PoliciesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -2703,23 +6238,23 @@ func (c *DnsKeysListCall) IfNoneMatch(entityTag string) *DnsKeysListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DnsKeysListCall) Context(ctx context.Context) *DnsKeysListCall { +func (c *PoliciesGetCall) Context(ctx context.Context) *PoliciesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DnsKeysListCall) Header() http.Header { +func (c *PoliciesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DnsKeysListCall) doRequest(alt string) (*http.Response, error) { +func (c *PoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -2730,7 +6265,7 @@ func (c *DnsKeysListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/policies/{policy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -2738,20 +6273,20 @@ func (c *DnsKeysListCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "managedZone": c.managedZone, + "project": c.project, + "policy": c.policy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.dnsKeys.list" call. -// Exactly one of *DnsKeysListResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *DnsKeysListResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DnsKeysListCall) Do(opts ...googleapi.CallOption) (*DnsKeysListResponse, error) { +// Do executes the "dns.policies.get" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *PoliciesGetCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -2770,7 +6305,7 @@ func (c *DnsKeysListCall) Do(opts ...googleapi.CallOption) (*DnsKeysListResponse if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DnsKeysListResponse{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -2782,37 +6317,26 @@ func (c *DnsKeysListCall) Do(opts ...googleapi.CallOption) (*DnsKeysListResponse } return ret, nil // { - // "description": "Enumerates DnsKeys to a ResourceRecordSet collection.", - // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys", + // "description": "Fetches the representation of an existing Policy.", + // "flatPath": "dns/v1/projects/{project}/policies/{policy}", // "httpMethod": "GET", - // "id": "dns.dnsKeys.list", + // "id": "dns.policies.get", // "parameterOrder": [ // "project", - // "managedZone" + // "policy" // ], // "parameters": { - // "digestType": { - // "description": "An optional comma-separated list of digest types to compute and display for key signing keys. If omitted, the recommended digest type is computed and displayed.", + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", // "location": "query", // "type": "string" // }, - // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + // "policy": { + // "description": "User given friendly name of the policy addressed by this request.", // "location": "path", // "required": true, // "type": "string" // }, - // "maxResults": { - // "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Identifies the project addressed by this request.", // "location": "path", @@ -2820,9 +6344,9 @@ func (c *DnsKeysListCall) Do(opts ...googleapi.CallOption) (*DnsKeysListResponse // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/dnsKeys", + // "path": "dns/v1/projects/{project}/policies/{policy}", // "response": { - // "$ref": "DnsKeysListResponse" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -2834,67 +6358,46 @@ func (c *DnsKeysListCall) Do(opts ...googleapi.CallOption) (*DnsKeysListResponse } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DnsKeysListCall) Pages(ctx context.Context, f func(*DnsKeysListResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "dns.managedZoneOperations.get": +// method id "dns.policies.list": -type ManagedZoneOperationsGetCall struct { +type PoliciesListCall struct { s *Service project string - managedZone string - operation string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Fetches the representation of an existing Operation. +// List: Enumerates all Policies associated with a project. // -// - managedZone: Identifies the managed zone addressed by this request. -// - operation: Identifies the operation addressed by this request (ID -// of the operation). // - project: Identifies the project addressed by this request. -func (r *ManagedZoneOperationsService) Get(project string, managedZone string, operation string) *ManagedZoneOperationsGetCall { - c := &ManagedZoneOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *PoliciesService) List(project string) *PoliciesListCall { + c := &PoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.managedZone = managedZone - c.operation = operation return c } -// ClientOperationId sets the optional parameter "clientOperationId": -// For mutating operation requests only. An optional identifier -// specified by the client. Must be unique for operation resources in -// the Operations collection. -func (c *ManagedZoneOperationsGetCall) ClientOperationId(clientOperationId string) *ManagedZoneOperationsGetCall { - c.urlParams_.Set("clientOperationId", clientOperationId) +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to be returned. If unspecified, the server decides how +// many results to return. +func (c *PoliciesListCall) MaxResults(maxResults int64) *PoliciesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A tag returned by +// a previous list request that was truncated. Use this parameter to +// continue a previous list request. +func (c *PoliciesListCall) PageToken(pageToken string) *PoliciesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ManagedZoneOperationsGetCall) Fields(s ...googleapi.Field) *ManagedZoneOperationsGetCall { +func (c *PoliciesListCall) Fields(s ...googleapi.Field) *PoliciesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -2904,7 +6407,7 @@ func (c *ManagedZoneOperationsGetCall) Fields(s ...googleapi.Field) *ManagedZone // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ManagedZoneOperationsGetCall) IfNoneMatch(entityTag string) *ManagedZoneOperationsGetCall { +func (c *PoliciesListCall) IfNoneMatch(entityTag string) *PoliciesListCall { c.ifNoneMatch_ = entityTag return c } @@ -2912,23 +6415,23 @@ func (c *ManagedZoneOperationsGetCall) IfNoneMatch(entityTag string) *ManagedZon // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ManagedZoneOperationsGetCall) Context(ctx context.Context) *ManagedZoneOperationsGetCall { +func (c *PoliciesListCall) Context(ctx context.Context) *PoliciesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ManagedZoneOperationsGetCall) Header() http.Header { +func (c *PoliciesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ManagedZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *PoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -2939,7 +6442,7 @@ func (c *ManagedZoneOperationsGetCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/policies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -2947,21 +6450,19 @@ func (c *ManagedZoneOperationsGetCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "managedZone": c.managedZone, - "operation": c.operation, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.managedZoneOperations.get" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ManagedZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "dns.policies.list" call. +// Exactly one of *PoliciesListResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *PoliciesListResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *PoliciesListCall) Do(opts ...googleapi.CallOption) (*PoliciesListResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -2980,7 +6481,7 @@ func (c *ManagedZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operat if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &PoliciesListResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -2992,31 +6493,23 @@ func (c *ManagedZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Fetches the representation of an existing Operation.", - // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/operations/{operation}", + // "description": "Enumerates all Policies associated with a project.", + // "flatPath": "dns/v1/projects/{project}/policies", // "httpMethod": "GET", - // "id": "dns.managedZoneOperations.get", + // "id": "dns.policies.list", // "parameterOrder": [ - // "project", - // "managedZone", - // "operation" + // "project" // ], // "parameters": { - // "clientOperationId": { - // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "maxResults": { + // "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", + // "format": "int32", // "location": "query", - // "type": "string" - // }, - // "managedZone": { - // "description": "Identifies the managed zone addressed by this request.", - // "location": "path", - // "required": true, - // "type": "string" + // "type": "integer" // }, - // "operation": { - // "description": "Identifies the operation addressed by this request (ID of the operation).", - // "location": "path", - // "required": true, + // "pageToken": { + // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + // "location": "query", // "type": "string" // }, // "project": { @@ -3026,9 +6519,9 @@ func (c *ManagedZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operat // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/operations/{operation}", + // "path": "dns/v1/projects/{project}/policies", // "response": { - // "$ref": "Operation" + // "$ref": "PoliciesListResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -3040,127 +6533,123 @@ func (c *ManagedZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "dns.managedZoneOperations.list": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *PoliciesListCall) Pages(ctx context.Context, f func(*PoliciesListResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ManagedZoneOperationsListCall struct { - s *Service - project string - managedZone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "dns.policies.patch": + +type PoliciesPatchCall struct { + s *Service + project string + policy string + policy2 *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Enumerates Operations for the given ManagedZone. +// Patch: Applies a partial update to an existing Policy. // -// - managedZone: Identifies the managed zone addressed by this request. +// - policy: User given friendly name of the policy addressed by this +// request. // - project: Identifies the project addressed by this request. -func (r *ManagedZoneOperationsService) List(project string, managedZone string) *ManagedZoneOperationsListCall { - c := &ManagedZoneOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *PoliciesService) Patch(project string, policy string, policy2 *Policy) *PoliciesPatchCall { + c := &PoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.managedZone = managedZone - return c -} - -// MaxResults sets the optional parameter "maxResults": Maximum number -// of results to be returned. If unspecified, the server decides how -// many results to return. -func (c *ManagedZoneOperationsListCall) MaxResults(maxResults int64) *ManagedZoneOperationsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": A tag returned by -// a previous list request that was truncated. Use this parameter to -// continue a previous list request. -func (c *ManagedZoneOperationsListCall) PageToken(pageToken string) *ManagedZoneOperationsListCall { - c.urlParams_.Set("pageToken", pageToken) + c.policy = policy + c.policy2 = policy2 return c } -// SortBy sets the optional parameter "sortBy": Sorting criterion. The -// only supported values are START_TIME and ID. -// -// Possible values: -// "startTime" (default) -// "id" -func (c *ManagedZoneOperationsListCall) SortBy(sortBy string) *ManagedZoneOperationsListCall { - c.urlParams_.Set("sortBy", sortBy) +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *PoliciesPatchCall) ClientOperationId(clientOperationId string) *PoliciesPatchCall { + c.urlParams_.Set("clientOperationId", clientOperationId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ManagedZoneOperationsListCall) Fields(s ...googleapi.Field) *ManagedZoneOperationsListCall { +func (c *PoliciesPatchCall) Fields(s ...googleapi.Field) *PoliciesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ManagedZoneOperationsListCall) IfNoneMatch(entityTag string) *ManagedZoneOperationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ManagedZoneOperationsListCall) Context(ctx context.Context) *ManagedZoneOperationsListCall { +func (c *PoliciesPatchCall) Context(ctx context.Context) *PoliciesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ManagedZoneOperationsListCall) Header() http.Header { +func (c *PoliciesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ManagedZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *PoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/policies/{policy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "managedZone": c.managedZone, + "project": c.project, + "policy": c.policy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.managedZoneOperations.list" call. -// Exactly one of *ManagedZoneOperationsListResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ManagedZoneOperationsListResponse.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *ManagedZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*ManagedZoneOperationsListResponse, error) { +// Do executes the "dns.policies.patch" call. +// Exactly one of *PoliciesPatchResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *PoliciesPatchResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *PoliciesPatchCall) Do(opts ...googleapi.CallOption) (*PoliciesPatchResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -3179,7 +6668,7 @@ func (c *ManagedZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*Manag if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ManagedZoneOperationsListResponse{ + ret := &PoliciesPatchResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -3191,30 +6680,24 @@ func (c *ManagedZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*Manag } return ret, nil // { - // "description": "Enumerates Operations for the given ManagedZone.", - // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/operations", - // "httpMethod": "GET", - // "id": "dns.managedZoneOperations.list", + // "description": "Applies a partial update to an existing Policy.", + // "flatPath": "dns/v1/projects/{project}/policies/{policy}", + // "httpMethod": "PATCH", + // "id": "dns.policies.patch", // "parameterOrder": [ // "project", - // "managedZone" + // "policy" // ], // "parameters": { - // "managedZone": { - // "description": "Identifies the managed zone addressed by this request.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", - // "format": "int32", + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", // "location": "query", - // "type": "integer" + // "type": "string" // }, - // "pageToken": { - // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", - // "location": "query", + // "policy": { + // "description": "User given friendly name of the policy addressed by this request.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -3222,75 +6705,45 @@ func (c *ManagedZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*Manag // "location": "path", // "required": true, // "type": "string" - // }, - // "sortBy": { - // "default": "startTime", - // "description": "Sorting criterion. The only supported values are START_TIME and ID.", - // "enum": [ - // "startTime", - // "id" - // ], - // "enumDescriptions": [ - // "", - // "" - // ], - // "location": "query", - // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/operations", + // "path": "dns/v1/projects/{project}/policies/{policy}", + // "request": { + // "$ref": "Policy" + // }, // "response": { - // "$ref": "ManagedZoneOperationsListResponse" + // "$ref": "PoliciesPatchResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/ndev.clouddns.readonly", // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ManagedZoneOperationsListCall) Pages(ctx context.Context, f func(*ManagedZoneOperationsListResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "dns.managedZones.create": +// method id "dns.policies.update": -type ManagedZonesCreateCall struct { - s *Service - project string - managedzone *ManagedZone - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type PoliciesUpdateCall struct { + s *Service + project string + policy string + policy2 *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Create: Creates a new ManagedZone. +// Update: Updates an existing Policy. // +// - policy: User given friendly name of the policy addressed by this +// request. // - project: Identifies the project addressed by this request. -func (r *ManagedZonesService) Create(project string, managedzone *ManagedZone) *ManagedZonesCreateCall { - c := &ManagedZonesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *PoliciesService) Update(project string, policy string, policy2 *Policy) *PoliciesUpdateCall { + c := &PoliciesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.managedzone = managedzone + c.policy = policy + c.policy2 = policy2 return c } @@ -3298,7 +6751,7 @@ func (r *ManagedZonesService) Create(project string, managedzone *ManagedZone) * // For mutating operation requests only. An optional identifier // specified by the client. Must be unique for operation resources in // the Operations collection. -func (c *ManagedZonesCreateCall) ClientOperationId(clientOperationId string) *ManagedZonesCreateCall { +func (c *PoliciesUpdateCall) ClientOperationId(clientOperationId string) *PoliciesUpdateCall { c.urlParams_.Set("clientOperationId", clientOperationId) return c } @@ -3306,7 +6759,7 @@ func (c *ManagedZonesCreateCall) ClientOperationId(clientOperationId string) *Ma // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ManagedZonesCreateCall) Fields(s ...googleapi.Field) *ManagedZonesCreateCall { +func (c *PoliciesUpdateCall) Fields(s ...googleapi.Field) *PoliciesUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -3314,56 +6767,57 @@ func (c *ManagedZonesCreateCall) Fields(s ...googleapi.Field) *ManagedZonesCreat // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ManagedZonesCreateCall) Context(ctx context.Context) *ManagedZonesCreateCall { +func (c *PoliciesUpdateCall) Context(ctx context.Context) *PoliciesUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ManagedZonesCreateCall) Header() http.Header { +func (c *PoliciesUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ManagedZonesCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *PoliciesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedzone) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/policies/{policy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "policy": c.policy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.managedZones.create" call. -// Exactly one of *ManagedZone or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ManagedZone.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ManagedZonesCreateCall) Do(opts ...googleapi.CallOption) (*ManagedZone, error) { +// Do executes the "dns.policies.update" call. +// Exactly one of *PoliciesUpdateResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *PoliciesUpdateResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *PoliciesUpdateCall) Do(opts ...googleapi.CallOption) (*PoliciesUpdateResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -3382,7 +6836,7 @@ func (c *ManagedZonesCreateCall) Do(opts ...googleapi.CallOption) (*ManagedZone, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ManagedZone{ + ret := &PoliciesUpdateResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -3394,12 +6848,13 @@ func (c *ManagedZonesCreateCall) Do(opts ...googleapi.CallOption) (*ManagedZone, } return ret, nil // { - // "description": "Creates a new ManagedZone.", - // "flatPath": "dns/v1/projects/{project}/managedZones", - // "httpMethod": "POST", - // "id": "dns.managedZones.create", + // "description": "Updates an existing Policy.", + // "flatPath": "dns/v1/projects/{project}/policies/{policy}", + // "httpMethod": "PUT", + // "id": "dns.policies.update", // "parameterOrder": [ - // "project" + // "project", + // "policy" // ], // "parameters": { // "clientOperationId": { @@ -3407,6 +6862,12 @@ func (c *ManagedZonesCreateCall) Do(opts ...googleapi.CallOption) (*ManagedZone, // "location": "query", // "type": "string" // }, + // "policy": { + // "description": "User given friendly name of the policy addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Identifies the project addressed by this request.", // "location": "path", @@ -3414,12 +6875,12 @@ func (c *ManagedZonesCreateCall) Do(opts ...googleapi.CallOption) (*ManagedZone, // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/managedZones", + // "path": "dns/v1/projects/{project}/policies/{policy}", // "request": { - // "$ref": "ManagedZone" + // "$ref": "Policy" // }, // "response": { - // "$ref": "ManagedZone" + // "$ref": "PoliciesUpdateResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -3429,26 +6890,23 @@ func (c *ManagedZonesCreateCall) Do(opts ...googleapi.CallOption) (*ManagedZone, } -// method id "dns.managedZones.delete": +// method id "dns.projects.get": -type ManagedZonesDeleteCall struct { - s *Service - project string - managedZone string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsGetCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes a previously created ManagedZone. +// Get: Fetches the representation of an existing Project. // -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. // - project: Identifies the project addressed by this request. -func (r *ManagedZonesService) Delete(project string, managedZone string) *ManagedZonesDeleteCall { - c := &ManagedZonesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ProjectsService) Get(project string) *ProjectsGetCall { + c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.managedZone = managedZone return c } @@ -3456,7 +6914,7 @@ func (r *ManagedZonesService) Delete(project string, managedZone string) *Manage // For mutating operation requests only. An optional identifier // specified by the client. Must be unique for operation resources in // the Operations collection. -func (c *ManagedZonesDeleteCall) ClientOperationId(clientOperationId string) *ManagedZonesDeleteCall { +func (c *ProjectsGetCall) ClientOperationId(clientOperationId string) *ProjectsGetCall { c.urlParams_.Set("clientOperationId", clientOperationId) return c } @@ -3464,72 +6922,108 @@ func (c *ManagedZonesDeleteCall) ClientOperationId(clientOperationId string) *Ma // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ManagedZonesDeleteCall) Fields(s ...googleapi.Field) *ManagedZonesDeleteCall { +func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ManagedZonesDeleteCall) Context(ctx context.Context) *ManagedZonesDeleteCall { +func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ManagedZonesDeleteCall) Header() http.Header { +func (c *ProjectsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ManagedZonesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "managedZone": c.managedZone, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.managedZones.delete" call. -func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "dns.projects.get" call. +// Exactly one of *Project or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Project.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return nil, err } - return nil + ret := &Project{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Deletes a previously created ManagedZone.", - // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", - // "httpMethod": "DELETE", - // "id": "dns.managedZones.delete", + // "description": "Fetches the representation of an existing Project.", + // "flatPath": "dns/v1/projects/{project}", + // "httpMethod": "GET", + // "id": "dns.projects.get", // "parameterOrder": [ - // "project", - // "managedZone" + // "project" // ], // "parameters": { // "clientOperationId": { @@ -3537,12 +7031,6 @@ func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) error { // "location": "query", // "type": "string" // }, - // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Identifies the project addressed by this request.", // "location": "path", @@ -3550,36 +7038,42 @@ func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) error { // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/managedZones/{managedZone}", + // "path": "dns/v1/projects/{project}", + // "response": { + // "$ref": "Project" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" // ] // } } -// method id "dns.managedZones.get": +// method id "dns.resourceRecordSets.create": -type ManagedZonesGetCall struct { - s *Service - project string - managedZone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ResourceRecordSetsCreateCall struct { + s *Service + project string + managedZone string + resourcerecordset *ResourceRecordSet + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Fetches the representation of an existing ManagedZone. +// Create: Creates a new ResourceRecordSet. // // - managedZone: Identifies the managed zone addressed by this request. // Can be the managed zone name or ID. // - project: Identifies the project addressed by this request. -func (r *ManagedZonesService) Get(project string, managedZone string) *ManagedZonesGetCall { - c := &ManagedZonesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ResourceRecordSetsService) Create(project string, managedZone string, resourcerecordset *ResourceRecordSet) *ResourceRecordSetsCreateCall { + c := &ResourceRecordSetsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.managedZone = managedZone + c.resourcerecordset = resourcerecordset return c } @@ -3587,7 +7081,7 @@ func (r *ManagedZonesService) Get(project string, managedZone string) *ManagedZo // For mutating operation requests only. An optional identifier // specified by the client. Must be unique for operation resources in // the Operations collection. -func (c *ManagedZonesGetCall) ClientOperationId(clientOperationId string) *ManagedZonesGetCall { +func (c *ResourceRecordSetsCreateCall) ClientOperationId(clientOperationId string) *ResourceRecordSetsCreateCall { c.urlParams_.Set("clientOperationId", clientOperationId) return c } @@ -3595,54 +7089,46 @@ func (c *ManagedZonesGetCall) ClientOperationId(clientOperationId string) *Manag // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ManagedZonesGetCall) Fields(s ...googleapi.Field) *ManagedZonesGetCall { +func (c *ResourceRecordSetsCreateCall) Fields(s ...googleapi.Field) *ResourceRecordSetsCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ManagedZonesGetCall) IfNoneMatch(entityTag string) *ManagedZonesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ManagedZonesGetCall) Context(ctx context.Context) *ManagedZonesGetCall { +func (c *ResourceRecordSetsCreateCall) Context(ctx context.Context) *ResourceRecordSetsCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ManagedZonesGetCall) Header() http.Header { +func (c *ResourceRecordSetsCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ManagedZonesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ResourceRecordSetsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcerecordset) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -3654,14 +7140,14 @@ func (c *ManagedZonesGetCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.managedZones.get" call. -// Exactly one of *ManagedZone or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ManagedZone.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ManagedZonesGetCall) Do(opts ...googleapi.CallOption) (*ManagedZone, error) { +// Do executes the "dns.resourceRecordSets.create" call. +// Exactly one of *ResourceRecordSet or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ResourceRecordSet.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResourceRecordSetsCreateCall) Do(opts ...googleapi.CallOption) (*ResourceRecordSet, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -3680,7 +7166,7 @@ func (c *ManagedZonesGetCall) Do(opts ...googleapi.CallOption) (*ManagedZone, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ManagedZone{ + ret := &ResourceRecordSet{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -3692,10 +7178,10 @@ func (c *ManagedZonesGetCall) Do(opts ...googleapi.CallOption) (*ManagedZone, er } return ret, nil // { - // "description": "Fetches the representation of an existing ManagedZone.", - // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", - // "httpMethod": "GET", - // "id": "dns.managedZones.get", + // "description": "Creates a new ResourceRecordSet.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets", + // "httpMethod": "POST", + // "id": "dns.resourceRecordSets.create", // "parameterOrder": [ // "project", // "managedZone" @@ -3719,133 +7205,118 @@ func (c *ManagedZonesGetCall) Do(opts ...googleapi.CallOption) (*ManagedZone, er // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/managedZones/{managedZone}", + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets", + // "request": { + // "$ref": "ResourceRecordSet" + // }, // "response": { - // "$ref": "ManagedZone" + // "$ref": "ResourceRecordSet" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/ndev.clouddns.readonly", // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" // ] // } } -// method id "dns.managedZones.list": +// method id "dns.resourceRecordSets.delete": -type ManagedZonesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ResourceRecordSetsDeleteCall struct { + s *Service + project string + managedZone string + name string + type_ string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Enumerates ManagedZones that have been created but not yet -// deleted. +// Delete: Deletes a previously created ResourceRecordSet. // +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - name: Fully qualified domain name. // - project: Identifies the project addressed by this request. -func (r *ManagedZonesService) List(project string) *ManagedZonesListCall { - c := &ManagedZonesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - type: RRSet type. +func (r *ResourceRecordSetsService) Delete(project string, managedZone string, name string, type_ string) *ResourceRecordSetsDeleteCall { + c := &ResourceRecordSetsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.managedZone = managedZone + c.name = name + c.type_ = type_ return c } -// DnsName sets the optional parameter "dnsName": Restricts the list to -// return only zones with this domain name. -func (c *ManagedZonesListCall) DnsName(dnsName string) *ManagedZonesListCall { - c.urlParams_.Set("dnsName", dnsName) - return c -} - -// MaxResults sets the optional parameter "maxResults": Maximum number -// of results to be returned. If unspecified, the server decides how -// many results to return. -func (c *ManagedZonesListCall) MaxResults(maxResults int64) *ManagedZonesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": A tag returned by -// a previous list request that was truncated. Use this parameter to -// continue a previous list request. -func (c *ManagedZonesListCall) PageToken(pageToken string) *ManagedZonesListCall { - c.urlParams_.Set("pageToken", pageToken) +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ResourceRecordSetsDeleteCall) ClientOperationId(clientOperationId string) *ResourceRecordSetsDeleteCall { + c.urlParams_.Set("clientOperationId", clientOperationId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ManagedZonesListCall) Fields(s ...googleapi.Field) *ManagedZonesListCall { +func (c *ResourceRecordSetsDeleteCall) Fields(s ...googleapi.Field) *ResourceRecordSetsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ManagedZonesListCall) IfNoneMatch(entityTag string) *ManagedZonesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ManagedZonesListCall) Context(ctx context.Context) *ManagedZonesListCall { +func (c *ResourceRecordSetsDeleteCall) Context(ctx context.Context) *ResourceRecordSetsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ManagedZonesListCall) Header() http.Header { +func (c *ResourceRecordSetsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ManagedZonesListCall) doRequest(alt string) (*http.Response, error) { +func (c *ResourceRecordSetsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "managedZone": c.managedZone, + "name": c.name, + "type": c.type_, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.managedZones.list" call. -// Exactly one of *ManagedZonesListResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *ManagedZonesListResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was +// Do executes the "dns.resourceRecordSets.delete" call. +// Exactly one of *ResourceRecordSetsDeleteResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ResourceRecordSetsDeleteResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ManagedZonesListCall) Do(opts ...googleapi.CallOption) (*ManagedZonesListResponse, error) { +func (c *ResourceRecordSetsDeleteCall) Do(opts ...googleapi.CallOption) (*ResourceRecordSetsDeleteResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -3864,7 +7335,7 @@ func (c *ManagedZonesListCall) Do(opts ...googleapi.CallOption) (*ManagedZonesLi if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ManagedZonesListResponse{ + ret := &ResourceRecordSetsDeleteResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -3876,28 +7347,32 @@ func (c *ManagedZonesListCall) Do(opts ...googleapi.CallOption) (*ManagedZonesLi } return ret, nil // { - // "description": "Enumerates ManagedZones that have been created but not yet deleted.", - // "flatPath": "dns/v1/projects/{project}/managedZones", - // "httpMethod": "GET", - // "id": "dns.managedZones.list", + // "description": "Deletes a previously created ResourceRecordSet.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", + // "httpMethod": "DELETE", + // "id": "dns.resourceRecordSets.delete", // "parameterOrder": [ - // "project" + // "project", + // "managedZone", + // "name", + // "type" // ], // "parameters": { - // "dnsName": { - // "description": "Restricts the list to return only zones with this domain name.", + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", // "location": "query", // "type": "string" // }, - // "maxResults": { - // "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", - // "format": "int32", - // "location": "query", - // "type": "integer" + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + // "location": "path", + // "required": true, + // "type": "string" // }, - // "pageToken": { - // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", - // "location": "query", + // "name": { + // "description": "Fully qualified domain name.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -3905,65 +7380,53 @@ func (c *ManagedZonesListCall) Do(opts ...googleapi.CallOption) (*ManagedZonesLi // "location": "path", // "required": true, // "type": "string" + // }, + // "type": { + // "description": "RRSet type.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/managedZones", + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", // "response": { - // "$ref": "ManagedZonesListResponse" + // "$ref": "ResourceRecordSetsDeleteResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/ndev.clouddns.readonly", // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ManagedZonesListCall) Pages(ctx context.Context, f func(*ManagedZonesListResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "dns.managedZones.patch": +// method id "dns.resourceRecordSets.get": -type ManagedZonesPatchCall struct { - s *Service - project string - managedZone string - managedzone *ManagedZone - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ResourceRecordSetsGetCall struct { + s *Service + project string + managedZone string + name string + type_ string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Patch: Applies a partial update to an existing ManagedZone. +// Get: Fetches the representation of an existing ResourceRecordSet. // // - managedZone: Identifies the managed zone addressed by this request. // Can be the managed zone name or ID. +// - name: Fully qualified domain name. // - project: Identifies the project addressed by this request. -func (r *ManagedZonesService) Patch(project string, managedZone string, managedzone *ManagedZone) *ManagedZonesPatchCall { - c := &ManagedZonesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - type: RRSet type. +func (r *ResourceRecordSetsService) Get(project string, managedZone string, name string, type_ string) *ResourceRecordSetsGetCall { + c := &ResourceRecordSetsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.managedZone = managedZone - c.managedzone = managedzone + c.name = name + c.type_ = type_ return c } @@ -3971,7 +7434,7 @@ func (r *ManagedZonesService) Patch(project string, managedZone string, managedz // For mutating operation requests only. An optional identifier // specified by the client. Must be unique for operation resources in // the Operations collection. -func (c *ManagedZonesPatchCall) ClientOperationId(clientOperationId string) *ManagedZonesPatchCall { +func (c *ResourceRecordSetsGetCall) ClientOperationId(clientOperationId string) *ResourceRecordSetsGetCall { c.urlParams_.Set("clientOperationId", clientOperationId) return c } @@ -3979,46 +7442,54 @@ func (c *ManagedZonesPatchCall) ClientOperationId(clientOperationId string) *Man // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ManagedZonesPatchCall) Fields(s ...googleapi.Field) *ManagedZonesPatchCall { +func (c *ResourceRecordSetsGetCall) Fields(s ...googleapi.Field) *ResourceRecordSetsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ResourceRecordSetsGetCall) IfNoneMatch(entityTag string) *ResourceRecordSetsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ManagedZonesPatchCall) Context(ctx context.Context) *ManagedZonesPatchCall { +func (c *ResourceRecordSetsGetCall) Context(ctx context.Context) *ResourceRecordSetsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ManagedZonesPatchCall) Header() http.Header { +func (c *ResourceRecordSetsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ManagedZonesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ResourceRecordSetsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedzone) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -4026,18 +7497,20 @@ func (c *ManagedZonesPatchCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "project": c.project, "managedZone": c.managedZone, + "name": c.name, + "type": c.type_, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.managedZones.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ManagedZonesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "dns.resourceRecordSets.get" call. +// Exactly one of *ResourceRecordSet or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ResourceRecordSet.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResourceRecordSetsGetCall) Do(opts ...googleapi.CallOption) (*ResourceRecordSet, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -4056,7 +7529,7 @@ func (c *ManagedZonesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &ResourceRecordSet{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -4068,13 +7541,15 @@ func (c *ManagedZonesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Applies a partial update to an existing ManagedZone.", - // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", - // "httpMethod": "PATCH", - // "id": "dns.managedZones.patch", + // "description": "Fetches the representation of an existing ResourceRecordSet.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", + // "httpMethod": "GET", + // "id": "dns.resourceRecordSets.get", // "parameterOrder": [ // "project", - // "managedZone" + // "managedZone", + // "name", + // "type" // ], // "parameters": { // "clientOperationId": { @@ -4088,105 +7563,146 @@ func (c *ManagedZonesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er // "required": true, // "type": "string" // }, + // "name": { + // "description": "Fully qualified domain name.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Identifies the project addressed by this request.", // "location": "path", // "required": true, // "type": "string" + // }, + // "type": { + // "description": "RRSet type.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/managedZones/{managedZone}", - // "request": { - // "$ref": "ManagedZone" - // }, + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", // "response": { - // "$ref": "Operation" + // "$ref": "ResourceRecordSet" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" // ] // } } -// method id "dns.managedZones.update": +// method id "dns.resourceRecordSets.list": -type ManagedZonesUpdateCall struct { - s *Service - project string - managedZone string - managedzone *ManagedZone - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ResourceRecordSetsListCall struct { + s *Service + project string + managedZone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates an existing ManagedZone. +// List: Enumerates ResourceRecordSets that you have created but not yet +// deleted. // // - managedZone: Identifies the managed zone addressed by this request. // Can be the managed zone name or ID. // - project: Identifies the project addressed by this request. -func (r *ManagedZonesService) Update(project string, managedZone string, managedzone *ManagedZone) *ManagedZonesUpdateCall { - c := &ManagedZonesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ResourceRecordSetsService) List(project string, managedZone string) *ResourceRecordSetsListCall { + c := &ResourceRecordSetsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.managedZone = managedZone - c.managedzone = managedzone return c } -// ClientOperationId sets the optional parameter "clientOperationId": -// For mutating operation requests only. An optional identifier -// specified by the client. Must be unique for operation resources in -// the Operations collection. -func (c *ManagedZonesUpdateCall) ClientOperationId(clientOperationId string) *ManagedZonesUpdateCall { - c.urlParams_.Set("clientOperationId", clientOperationId) +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to be returned. If unspecified, the server decides how +// many results to return. +func (c *ResourceRecordSetsListCall) MaxResults(maxResults int64) *ResourceRecordSetsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// Name sets the optional parameter "name": Restricts the list to return +// only records with this fully qualified domain name. +func (c *ResourceRecordSetsListCall) Name(name string) *ResourceRecordSetsListCall { + c.urlParams_.Set("name", name) + return c +} + +// PageToken sets the optional parameter "pageToken": A tag returned by +// a previous list request that was truncated. Use this parameter to +// continue a previous list request. +func (c *ResourceRecordSetsListCall) PageToken(pageToken string) *ResourceRecordSetsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Type sets the optional parameter "type": Restricts the list to return +// only records of this type. If present, the "name" parameter must also +// be present. +func (c *ResourceRecordSetsListCall) Type(type_ string) *ResourceRecordSetsListCall { + c.urlParams_.Set("type", type_) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ManagedZonesUpdateCall) Fields(s ...googleapi.Field) *ManagedZonesUpdateCall { +func (c *ResourceRecordSetsListCall) Fields(s ...googleapi.Field) *ResourceRecordSetsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ResourceRecordSetsListCall) IfNoneMatch(entityTag string) *ResourceRecordSetsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ManagedZonesUpdateCall) Context(ctx context.Context) *ManagedZonesUpdateCall { +func (c *ResourceRecordSetsListCall) Context(ctx context.Context) *ResourceRecordSetsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ManagedZonesUpdateCall) Header() http.Header { +func (c *ResourceRecordSetsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ManagedZonesUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *ResourceRecordSetsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedzone) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -4198,14 +7714,14 @@ func (c *ManagedZonesUpdateCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.managedZones.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ManagedZonesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "dns.resourceRecordSets.list" call. +// Exactly one of *ResourceRecordSetsListResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ResourceRecordSetsListResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResourceRecordSetsListCall) Do(opts ...googleapi.CallOption) (*ResourceRecordSetsListResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -4224,7 +7740,7 @@ func (c *ManagedZonesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &ResourceRecordSetsListResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -4236,66 +7752,112 @@ func (c *ManagedZonesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Updates an existing ManagedZone.", - // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}", - // "httpMethod": "PUT", - // "id": "dns.managedZones.update", + // "description": "Enumerates ResourceRecordSets that you have created but not yet deleted.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets", + // "httpMethod": "GET", + // "id": "dns.resourceRecordSets.list", // "parameterOrder": [ // "project", // "managedZone" // ], // "parameters": { - // "clientOperationId": { - // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", - // "location": "query", - // "type": "string" - // }, // "managedZone": { // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", // "location": "path", // "required": true, // "type": "string" // }, + // "maxResults": { + // "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "name": { + // "description": "Restricts the list to return only records with this fully qualified domain name.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Identifies the project addressed by this request.", // "location": "path", // "required": true, // "type": "string" + // }, + // "type": { + // "description": "Restricts the list to return only records of this type. If present, the \"name\" parameter must also be present.", + // "location": "query", + // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/managedZones/{managedZone}", - // "request": { - // "$ref": "ManagedZone" - // }, + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets", // "response": { - // "$ref": "Operation" + // "$ref": "ResourceRecordSetsListResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" // ] // } } -// method id "dns.policies.create": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ResourceRecordSetsListCall) Pages(ctx context.Context, f func(*ResourceRecordSetsListResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type PoliciesCreateCall struct { - s *Service - project string - policy *Policy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "dns.resourceRecordSets.patch": + +type ResourceRecordSetsPatchCall struct { + s *Service + project string + managedZone string + name string + type_ string + resourcerecordset *ResourceRecordSet + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Create: Creates a new Policy. +// Patch: Applies a partial update to an existing ResourceRecordSet. // +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - name: Fully qualified domain name. // - project: Identifies the project addressed by this request. -func (r *PoliciesService) Create(project string, policy *Policy) *PoliciesCreateCall { - c := &PoliciesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - type: RRSet type. +func (r *ResourceRecordSetsService) Patch(project string, managedZone string, name string, type_ string, resourcerecordset *ResourceRecordSet) *ResourceRecordSetsPatchCall { + c := &ResourceRecordSetsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.policy = policy + c.managedZone = managedZone + c.name = name + c.type_ = type_ + c.resourcerecordset = resourcerecordset return c } @@ -4303,7 +7865,7 @@ func (r *PoliciesService) Create(project string, policy *Policy) *PoliciesCreate // For mutating operation requests only. An optional identifier // specified by the client. Must be unique for operation resources in // the Operations collection. -func (c *PoliciesCreateCall) ClientOperationId(clientOperationId string) *PoliciesCreateCall { +func (c *ResourceRecordSetsPatchCall) ClientOperationId(clientOperationId string) *ResourceRecordSetsPatchCall { c.urlParams_.Set("clientOperationId", clientOperationId) return c } @@ -4311,7 +7873,7 @@ func (c *PoliciesCreateCall) ClientOperationId(clientOperationId string) *Polici // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PoliciesCreateCall) Fields(s ...googleapi.Field) *PoliciesCreateCall { +func (c *ResourceRecordSetsPatchCall) Fields(s ...googleapi.Field) *ResourceRecordSetsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -4319,56 +7881,59 @@ func (c *PoliciesCreateCall) Fields(s ...googleapi.Field) *PoliciesCreateCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PoliciesCreateCall) Context(ctx context.Context) *PoliciesCreateCall { +func (c *ResourceRecordSetsPatchCall) Context(ctx context.Context) *ResourceRecordSetsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PoliciesCreateCall) Header() http.Header { +func (c *ResourceRecordSetsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PoliciesCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *ResourceRecordSetsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcerecordset) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/policies") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "managedZone": c.managedZone, + "name": c.name, + "type": c.type_, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.policies.create" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *PoliciesCreateCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "dns.resourceRecordSets.patch" call. +// Exactly one of *ResourceRecordSet or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ResourceRecordSet.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResourceRecordSetsPatchCall) Do(opts ...googleapi.CallOption) (*ResourceRecordSet, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -4387,7 +7952,7 @@ func (c *PoliciesCreateCall) Do(opts ...googleapi.CallOption) (*Policy, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &ResourceRecordSet{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -4399,12 +7964,15 @@ func (c *PoliciesCreateCall) Do(opts ...googleapi.CallOption) (*Policy, error) { } return ret, nil // { - // "description": "Creates a new Policy.", - // "flatPath": "dns/v1/projects/{project}/policies", - // "httpMethod": "POST", - // "id": "dns.policies.create", + // "description": "Applies a partial update to an existing ResourceRecordSet.", + // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", + // "httpMethod": "PATCH", + // "id": "dns.resourceRecordSets.patch", // "parameterOrder": [ - // "project" + // "project", + // "managedZone", + // "name", + // "type" // ], // "parameters": { // "clientOperationId": { @@ -4412,19 +7980,37 @@ func (c *PoliciesCreateCall) Do(opts ...googleapi.CallOption) (*Policy, error) { // "location": "query", // "type": "string" // }, + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "name": { + // "description": "Fully qualified domain name.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Identifies the project addressed by this request.", // "location": "path", // "required": true, // "type": "string" + // }, + // "type": { + // "description": "RRSet type.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/policies", + // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", // "request": { - // "$ref": "Policy" + // "$ref": "ResourceRecordSet" // }, // "response": { - // "$ref": "Policy" + // "$ref": "ResourceRecordSet" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -4434,27 +8020,24 @@ func (c *PoliciesCreateCall) Do(opts ...googleapi.CallOption) (*Policy, error) { } -// method id "dns.policies.delete": +// method id "dns.responsePolicies.create": -type PoliciesDeleteCall struct { - s *Service - project string - policy string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ResponsePoliciesCreateCall struct { + s *Service + project string + responsepolicy *ResponsePolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes a previously created Policy. Fails if the policy is -// still being referenced by a network. +// Create: Creates a new Response Policy // -// - policy: User given friendly name of the policy addressed by this -// request. // - project: Identifies the project addressed by this request. -func (r *PoliciesService) Delete(project string, policy string) *PoliciesDeleteCall { - c := &PoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ResponsePoliciesService) Create(project string, responsepolicy *ResponsePolicy) *ResponsePoliciesCreateCall { + c := &ResponsePoliciesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.policy = policy + c.responsepolicy = responsepolicy return c } @@ -4462,7 +8045,7 @@ func (r *PoliciesService) Delete(project string, policy string) *PoliciesDeleteC // For mutating operation requests only. An optional identifier // specified by the client. Must be unique for operation resources in // the Operations collection. -func (c *PoliciesDeleteCall) ClientOperationId(clientOperationId string) *PoliciesDeleteCall { +func (c *ResponsePoliciesCreateCall) ClientOperationId(clientOperationId string) *ResponsePoliciesCreateCall { c.urlParams_.Set("clientOperationId", clientOperationId) return c } @@ -4470,7 +8053,7 @@ func (c *PoliciesDeleteCall) ClientOperationId(clientOperationId string) *Polici // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PoliciesDeleteCall) Fields(s ...googleapi.Field) *PoliciesDeleteCall { +func (c *ResponsePoliciesCreateCall) Fields(s ...googleapi.Field) *ResponsePoliciesCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -4478,64 +8061,92 @@ func (c *PoliciesDeleteCall) Fields(s ...googleapi.Field) *PoliciesDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PoliciesDeleteCall) Context(ctx context.Context) *PoliciesDeleteCall { +func (c *ResponsePoliciesCreateCall) Context(ctx context.Context) *ResponsePoliciesCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PoliciesDeleteCall) Header() http.Header { +func (c *ResponsePoliciesCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ResponsePoliciesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.responsepolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/policies/{policy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/responsePolicies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "policy": c.policy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.policies.delete" call. -func (c *PoliciesDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "dns.responsePolicies.create" call. +// Exactly one of *ResponsePolicy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ResponsePolicy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResponsePoliciesCreateCall) Do(opts ...googleapi.CallOption) (*ResponsePolicy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return nil, err } - return nil + ret := &ResponsePolicy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Deletes a previously created Policy. Fails if the policy is still being referenced by a network.", - // "flatPath": "dns/v1/projects/{project}/policies/{policy}", - // "httpMethod": "DELETE", - // "id": "dns.policies.delete", + // "description": "Creates a new Response Policy", + // "flatPath": "dns/v1/projects/{project}/responsePolicies", + // "httpMethod": "POST", + // "id": "dns.responsePolicies.create", // "parameterOrder": [ - // "project", - // "policy" + // "project" // ], // "parameters": { // "clientOperationId": { @@ -4543,12 +8154,6 @@ func (c *PoliciesDeleteCall) Do(opts ...googleapi.CallOption) error { // "location": "query", // "type": "string" // }, - // "policy": { - // "description": "User given friendly name of the policy addressed by this request.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Identifies the project addressed by this request.", // "location": "path", @@ -4556,7 +8161,13 @@ func (c *PoliciesDeleteCall) Do(opts ...googleapi.CallOption) error { // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/policies/{policy}", + // "path": "dns/v1/projects/{project}/responsePolicies", + // "request": { + // "$ref": "ResponsePolicy" + // }, + // "response": { + // "$ref": "ResponsePolicy" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" @@ -4565,27 +8176,27 @@ func (c *PoliciesDeleteCall) Do(opts ...googleapi.CallOption) error { } -// method id "dns.policies.get": +// method id "dns.responsePolicies.delete": -type PoliciesGetCall struct { - s *Service - project string - policy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ResponsePoliciesDeleteCall struct { + s *Service + project string + responsePolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Fetches the representation of an existing Policy. +// Delete: Deletes a previously created Response Policy. Fails if the +// response policy is non-empty or still being referenced by a network. // -// - policy: User given friendly name of the policy addressed by this -// request. // - project: Identifies the project addressed by this request. -func (r *PoliciesService) Get(project string, policy string) *PoliciesGetCall { - c := &PoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - responsePolicy: User assigned name of the Response Policy addressed +// by this request. +func (r *ResponsePoliciesService) Delete(project string, responsePolicy string) *ResponsePoliciesDeleteCall { + c := &ResponsePoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.policy = policy + c.responsePolicy = responsePolicy return c } @@ -4593,7 +8204,7 @@ func (r *PoliciesService) Get(project string, policy string) *PoliciesGetCall { // For mutating operation requests only. An optional identifier // specified by the client. Must be unique for operation resources in // the Operations collection. -func (c *PoliciesGetCall) ClientOperationId(clientOperationId string) *PoliciesGetCall { +func (c *ResponsePoliciesDeleteCall) ClientOperationId(clientOperationId string) *ResponsePoliciesDeleteCall { c.urlParams_.Set("clientOperationId", clientOperationId) return c } @@ -4601,110 +8212,72 @@ func (c *PoliciesGetCall) ClientOperationId(clientOperationId string) *PoliciesG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PoliciesGetCall) Fields(s ...googleapi.Field) *PoliciesGetCall { +func (c *ResponsePoliciesDeleteCall) Fields(s ...googleapi.Field) *ResponsePoliciesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *PoliciesGetCall) IfNoneMatch(entityTag string) *PoliciesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PoliciesGetCall) Context(ctx context.Context) *PoliciesGetCall { +func (c *ResponsePoliciesDeleteCall) Context(ctx context.Context) *ResponsePoliciesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PoliciesGetCall) Header() http.Header { +func (c *ResponsePoliciesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PoliciesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ResponsePoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/policies/{policy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/responsePolicies/{responsePolicy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "policy": c.policy, + "project": c.project, + "responsePolicy": c.responsePolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.policies.get" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *PoliciesGetCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "dns.responsePolicies.delete" call. +func (c *ResponsePoliciesDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return err } - return ret, nil + return nil // { - // "description": "Fetches the representation of an existing Policy.", - // "flatPath": "dns/v1/projects/{project}/policies/{policy}", - // "httpMethod": "GET", - // "id": "dns.policies.get", + // "description": "Deletes a previously created Response Policy. Fails if the response policy is non-empty or still being referenced by a network.", + // "flatPath": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}", + // "httpMethod": "DELETE", + // "id": "dns.responsePolicies.delete", // "parameterOrder": [ // "project", - // "policy" + // "responsePolicy" // ], // "parameters": { // "clientOperationId": { @@ -4712,73 +8285,65 @@ func (c *PoliciesGetCall) Do(opts ...googleapi.CallOption) (*Policy, error) { // "location": "query", // "type": "string" // }, - // "policy": { - // "description": "User given friendly name of the policy addressed by this request.", + // "project": { + // "description": "Identifies the project addressed by this request.", // "location": "path", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Identifies the project addressed by this request.", + // "responsePolicy": { + // "description": "User assigned name of the Response Policy addressed by this request.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/policies/{policy}", - // "response": { - // "$ref": "Policy" - // }, + // "path": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/ndev.clouddns.readonly", // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" // ] // } } -// method id "dns.policies.list": - -type PoliciesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} +// method id "dns.responsePolicies.get": -// List: Enumerates all Policies associated with a project. -// -// - project: Identifies the project addressed by this request. -func (r *PoliciesService) List(project string) *PoliciesListCall { - c := &PoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c +type ResponsePoliciesGetCall struct { + s *Service + project string + responsePolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// MaxResults sets the optional parameter "maxResults": Maximum number -// of results to be returned. If unspecified, the server decides how -// many results to return. -func (c *PoliciesListCall) MaxResults(maxResults int64) *PoliciesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// Get: Fetches the representation of an existing Response Policy. +// +// - project: Identifies the project addressed by this request. +// - responsePolicy: User assigned name of the Response Policy addressed +// by this request. +func (r *ResponsePoliciesService) Get(project string, responsePolicy string) *ResponsePoliciesGetCall { + c := &ResponsePoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.responsePolicy = responsePolicy return c } -// PageToken sets the optional parameter "pageToken": A tag returned by -// a previous list request that was truncated. Use this parameter to -// continue a previous list request. -func (c *PoliciesListCall) PageToken(pageToken string) *PoliciesListCall { - c.urlParams_.Set("pageToken", pageToken) +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ResponsePoliciesGetCall) ClientOperationId(clientOperationId string) *ResponsePoliciesGetCall { + c.urlParams_.Set("clientOperationId", clientOperationId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PoliciesListCall) Fields(s ...googleapi.Field) *PoliciesListCall { +func (c *ResponsePoliciesGetCall) Fields(s ...googleapi.Field) *ResponsePoliciesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -4788,7 +8353,7 @@ func (c *PoliciesListCall) Fields(s ...googleapi.Field) *PoliciesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *PoliciesListCall) IfNoneMatch(entityTag string) *PoliciesListCall { +func (c *ResponsePoliciesGetCall) IfNoneMatch(entityTag string) *ResponsePoliciesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -4796,23 +8361,23 @@ func (c *PoliciesListCall) IfNoneMatch(entityTag string) *PoliciesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PoliciesListCall) Context(ctx context.Context) *PoliciesListCall { +func (c *ResponsePoliciesGetCall) Context(ctx context.Context) *ResponsePoliciesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PoliciesListCall) Header() http.Header { +func (c *ResponsePoliciesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PoliciesListCall) doRequest(alt string) (*http.Response, error) { +func (c *ResponsePoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -4823,7 +8388,7 @@ func (c *PoliciesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/policies") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/responsePolicies/{responsePolicy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -4831,19 +8396,20 @@ func (c *PoliciesListCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "responsePolicy": c.responsePolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.policies.list" call. -// Exactly one of *PoliciesListResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *PoliciesListResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "dns.responsePolicies.get" call. +// Exactly one of *ResponsePolicy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ResponsePolicy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *PoliciesListCall) Do(opts ...googleapi.CallOption) (*PoliciesListResponse, error) { +func (c *ResponsePoliciesGetCall) Do(opts ...googleapi.CallOption) (*ResponsePolicy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -4862,7 +8428,7 @@ func (c *PoliciesListCall) Do(opts ...googleapi.CallOption) (*PoliciesListRespon if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &PoliciesListResponse{ + ret := &ResponsePolicy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -4874,22 +8440,17 @@ func (c *PoliciesListCall) Do(opts ...googleapi.CallOption) (*PoliciesListRespon } return ret, nil // { - // "description": "Enumerates all Policies associated with a project.", - // "flatPath": "dns/v1/projects/{project}/policies", + // "description": "Fetches the representation of an existing Response Policy.", + // "flatPath": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}", // "httpMethod": "GET", - // "id": "dns.policies.list", + // "id": "dns.responsePolicies.get", // "parameterOrder": [ - // "project" + // "project", + // "responsePolicy" // ], // "parameters": { - // "maxResults": { - // "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", // "location": "query", // "type": "string" // }, @@ -4898,11 +8459,17 @@ func (c *PoliciesListCall) Do(opts ...googleapi.CallOption) (*PoliciesListRespon // "location": "path", // "required": true, // "type": "string" + // }, + // "responsePolicy": { + // "description": "User assigned name of the Response Policy addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/policies", + // "path": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}", // "response": { - // "$ref": "PoliciesListResponse" + // "$ref": "ResponsePolicy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -4914,123 +8481,111 @@ func (c *PoliciesListCall) Do(opts ...googleapi.CallOption) (*PoliciesListRespon } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *PoliciesListCall) Pages(ctx context.Context, f func(*PoliciesListResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "dns.policies.patch": +// method id "dns.responsePolicies.list": -type PoliciesPatchCall struct { - s *Service - project string - policy string - policy2 *Policy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ResponsePoliciesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Patch: Applies a partial update to an existing Policy. +// List: Enumerates all Response Policies associated with a project. // -// - policy: User given friendly name of the policy addressed by this -// request. // - project: Identifies the project addressed by this request. -func (r *PoliciesService) Patch(project string, policy string, policy2 *Policy) *PoliciesPatchCall { - c := &PoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ResponsePoliciesService) List(project string) *ResponsePoliciesListCall { + c := &ResponsePoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.policy = policy - c.policy2 = policy2 return c } -// ClientOperationId sets the optional parameter "clientOperationId": -// For mutating operation requests only. An optional identifier -// specified by the client. Must be unique for operation resources in -// the Operations collection. -func (c *PoliciesPatchCall) ClientOperationId(clientOperationId string) *PoliciesPatchCall { - c.urlParams_.Set("clientOperationId", clientOperationId) +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to be returned. If unspecified, the server decides how +// many results to return. +func (c *ResponsePoliciesListCall) MaxResults(maxResults int64) *ResponsePoliciesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A tag returned by +// a previous list request that was truncated. Use this parameter to +// continue a previous list request. +func (c *ResponsePoliciesListCall) PageToken(pageToken string) *ResponsePoliciesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PoliciesPatchCall) Fields(s ...googleapi.Field) *PoliciesPatchCall { +func (c *ResponsePoliciesListCall) Fields(s ...googleapi.Field) *ResponsePoliciesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ResponsePoliciesListCall) IfNoneMatch(entityTag string) *ResponsePoliciesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PoliciesPatchCall) Context(ctx context.Context) *PoliciesPatchCall { +func (c *ResponsePoliciesListCall) Context(ctx context.Context) *ResponsePoliciesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PoliciesPatchCall) Header() http.Header { +func (c *ResponsePoliciesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PoliciesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ResponsePoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy2) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/policies/{policy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/responsePolicies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "policy": c.policy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.policies.patch" call. -// Exactly one of *PoliciesPatchResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *PoliciesPatchResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "dns.responsePolicies.list" call. +// Exactly one of *ResponsePoliciesListResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ResponsePoliciesListResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *PoliciesPatchCall) Do(opts ...googleapi.CallOption) (*PoliciesPatchResponse, error) { +func (c *ResponsePoliciesListCall) Do(opts ...googleapi.CallOption) (*ResponsePoliciesListResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -5049,7 +8604,7 @@ func (c *PoliciesPatchCall) Do(opts ...googleapi.CallOption) (*PoliciesPatchResp if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &PoliciesPatchResponse{ + ret := &ResponsePoliciesListResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -5061,24 +8616,23 @@ func (c *PoliciesPatchCall) Do(opts ...googleapi.CallOption) (*PoliciesPatchResp } return ret, nil // { - // "description": "Applies a partial update to an existing Policy.", - // "flatPath": "dns/v1/projects/{project}/policies/{policy}", - // "httpMethod": "PATCH", - // "id": "dns.policies.patch", + // "description": "Enumerates all Response Policies associated with a project.", + // "flatPath": "dns/v1/projects/{project}/responsePolicies", + // "httpMethod": "GET", + // "id": "dns.responsePolicies.list", // "parameterOrder": [ - // "project", - // "policy" + // "project" // ], // "parameters": { - // "clientOperationId": { - // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "maxResults": { + // "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", + // "format": "int32", // "location": "query", - // "type": "string" + // "type": "integer" // }, - // "policy": { - // "description": "User given friendly name of the policy addressed by this request.", - // "location": "path", - // "required": true, + // "pageToken": { + // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + // "location": "query", // "type": "string" // }, // "project": { @@ -5088,43 +8642,63 @@ func (c *PoliciesPatchCall) Do(opts ...googleapi.CallOption) (*PoliciesPatchResp // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/policies/{policy}", - // "request": { - // "$ref": "Policy" - // }, + // "path": "dns/v1/projects/{project}/responsePolicies", // "response": { - // "$ref": "PoliciesPatchResponse" + // "$ref": "ResponsePoliciesListResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" // ] // } } -// method id "dns.policies.update": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ResponsePoliciesListCall) Pages(ctx context.Context, f func(*ResponsePoliciesListResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type PoliciesUpdateCall struct { - s *Service - project string - policy string - policy2 *Policy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "dns.responsePolicies.patch": + +type ResponsePoliciesPatchCall struct { + s *Service + project string + responsePolicy string + responsepolicy *ResponsePolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates an existing Policy. +// Patch: Applies a partial update to an existing Response Policy. // -// - policy: User given friendly name of the policy addressed by this -// request. // - project: Identifies the project addressed by this request. -func (r *PoliciesService) Update(project string, policy string, policy2 *Policy) *PoliciesUpdateCall { - c := &PoliciesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - responsePolicy: User assigned name of the Respones Policy addressed +// by this request. +func (r *ResponsePoliciesService) Patch(project string, responsePolicy string, responsepolicy *ResponsePolicy) *ResponsePoliciesPatchCall { + c := &ResponsePoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.policy = policy - c.policy2 = policy2 + c.responsePolicy = responsePolicy + c.responsepolicy = responsepolicy return c } @@ -5132,7 +8706,7 @@ func (r *PoliciesService) Update(project string, policy string, policy2 *Policy) // For mutating operation requests only. An optional identifier // specified by the client. Must be unique for operation resources in // the Operations collection. -func (c *PoliciesUpdateCall) ClientOperationId(clientOperationId string) *PoliciesUpdateCall { +func (c *ResponsePoliciesPatchCall) ClientOperationId(clientOperationId string) *ResponsePoliciesPatchCall { c.urlParams_.Set("clientOperationId", clientOperationId) return c } @@ -5140,7 +8714,7 @@ func (c *PoliciesUpdateCall) ClientOperationId(clientOperationId string) *Polici // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PoliciesUpdateCall) Fields(s ...googleapi.Field) *PoliciesUpdateCall { +func (c *ResponsePoliciesPatchCall) Fields(s ...googleapi.Field) *ResponsePoliciesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -5148,57 +8722,57 @@ func (c *PoliciesUpdateCall) Fields(s ...googleapi.Field) *PoliciesUpdateCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PoliciesUpdateCall) Context(ctx context.Context) *PoliciesUpdateCall { +func (c *ResponsePoliciesPatchCall) Context(ctx context.Context) *ResponsePoliciesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PoliciesUpdateCall) Header() http.Header { +func (c *ResponsePoliciesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PoliciesUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *ResponsePoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.responsepolicy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/policies/{policy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/responsePolicies/{responsePolicy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "policy": c.policy, + "project": c.project, + "responsePolicy": c.responsePolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.policies.update" call. -// Exactly one of *PoliciesUpdateResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *PoliciesUpdateResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "dns.responsePolicies.patch" call. +// Exactly one of *ResponsePoliciesPatchResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ResponsePoliciesPatchResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *PoliciesUpdateCall) Do(opts ...googleapi.CallOption) (*PoliciesUpdateResponse, error) { +func (c *ResponsePoliciesPatchCall) Do(opts ...googleapi.CallOption) (*ResponsePoliciesPatchResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -5217,7 +8791,7 @@ func (c *PoliciesUpdateCall) Do(opts ...googleapi.CallOption) (*PoliciesUpdateRe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &PoliciesUpdateResponse{ + ret := &ResponsePoliciesPatchResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -5229,13 +8803,13 @@ func (c *PoliciesUpdateCall) Do(opts ...googleapi.CallOption) (*PoliciesUpdateRe } return ret, nil // { - // "description": "Updates an existing Policy.", - // "flatPath": "dns/v1/projects/{project}/policies/{policy}", - // "httpMethod": "PUT", - // "id": "dns.policies.update", + // "description": "Applies a partial update to an existing Response Policy.", + // "flatPath": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}", + // "httpMethod": "PATCH", + // "id": "dns.responsePolicies.patch", // "parameterOrder": [ // "project", - // "policy" + // "responsePolicy" // ], // "parameters": { // "clientOperationId": { @@ -5243,25 +8817,25 @@ func (c *PoliciesUpdateCall) Do(opts ...googleapi.CallOption) (*PoliciesUpdateRe // "location": "query", // "type": "string" // }, - // "policy": { - // "description": "User given friendly name of the policy addressed by this request.", + // "project": { + // "description": "Identifies the project addressed by this request.", // "location": "path", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Identifies the project addressed by this request.", + // "responsePolicy": { + // "description": "User assigned name of the Respones Policy addressed by this request.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/policies/{policy}", + // "path": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}", // "request": { - // "$ref": "Policy" + // "$ref": "ResponsePolicy" // }, // "response": { - // "$ref": "PoliciesUpdateResponse" + // "$ref": "ResponsePoliciesPatchResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -5271,23 +8845,28 @@ func (c *PoliciesUpdateCall) Do(opts ...googleapi.CallOption) (*PoliciesUpdateRe } -// method id "dns.projects.get": +// method id "dns.responsePolicies.update": -type ProjectsGetCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ResponsePoliciesUpdateCall struct { + s *Service + project string + responsePolicy string + responsepolicy *ResponsePolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Fetches the representation of an existing Project. +// Update: Updates an existing Response Policy. // // - project: Identifies the project addressed by this request. -func (r *ProjectsService) Get(project string) *ProjectsGetCall { - c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - responsePolicy: User assigned name of the Response Policy addressed +// by this request. +func (r *ResponsePoliciesService) Update(project string, responsePolicy string, responsepolicy *ResponsePolicy) *ResponsePoliciesUpdateCall { + c := &ResponsePoliciesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.responsePolicy = responsePolicy + c.responsepolicy = responsepolicy return c } @@ -5295,7 +8874,7 @@ func (r *ProjectsService) Get(project string) *ProjectsGetCall { // For mutating operation requests only. An optional identifier // specified by the client. Must be unique for operation resources in // the Operations collection. -func (c *ProjectsGetCall) ClientOperationId(clientOperationId string) *ProjectsGetCall { +func (c *ResponsePoliciesUpdateCall) ClientOperationId(clientOperationId string) *ResponsePoliciesUpdateCall { c.urlParams_.Set("clientOperationId", clientOperationId) return c } @@ -5303,72 +8882,65 @@ func (c *ProjectsGetCall) ClientOperationId(clientOperationId string) *ProjectsG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { +func (c *ResponsePoliciesUpdateCall) Fields(s ...googleapi.Field) *ResponsePoliciesUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { +func (c *ResponsePoliciesUpdateCall) Context(ctx context.Context) *ResponsePoliciesUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGetCall) Header() http.Header { +func (c *ResponsePoliciesUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ResponsePoliciesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.responsepolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/responsePolicies/{responsePolicy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "responsePolicy": c.responsePolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.projects.get" call. -// Exactly one of *Project or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Project.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { +// Do executes the "dns.responsePolicies.update" call. +// Exactly one of *ResponsePoliciesUpdateResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ResponsePoliciesUpdateResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResponsePoliciesUpdateCall) Do(opts ...googleapi.CallOption) (*ResponsePoliciesUpdateResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -5387,7 +8959,7 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Project{ + ret := &ResponsePoliciesUpdateResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -5399,12 +8971,13 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { } return ret, nil // { - // "description": "Fetches the representation of an existing Project.", - // "flatPath": "dns/v1/projects/{project}", - // "httpMethod": "GET", - // "id": "dns.projects.get", + // "description": "Updates an existing Response Policy.", + // "flatPath": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}", + // "httpMethod": "PUT", + // "id": "dns.responsePolicies.update", // "parameterOrder": [ - // "project" + // "project", + // "responsePolicy" // ], // "parameters": { // "clientOperationId": { @@ -5417,44 +8990,51 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { // "location": "path", // "required": true, // "type": "string" + // }, + // "responsePolicy": { + // "description": "User assigned name of the Response Policy addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}", + // "path": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}", + // "request": { + // "$ref": "ResponsePolicy" + // }, // "response": { - // "$ref": "Project" + // "$ref": "ResponsePoliciesUpdateResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/ndev.clouddns.readonly", // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" // ] // } } -// method id "dns.resourceRecordSets.create": +// method id "dns.responsePolicyRules.create": -type ResourceRecordSetsCreateCall struct { - s *Service - project string - managedZone string - resourcerecordset *ResourceRecordSet - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ResponsePolicyRulesCreateCall struct { + s *Service + project string + responsePolicy string + responsepolicyrule *ResponsePolicyRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Create: Creates a new ResourceRecordSet. +// Create: Creates a new Response Policy Rule. // -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. // - project: Identifies the project addressed by this request. -func (r *ResourceRecordSetsService) Create(project string, managedZone string, resourcerecordset *ResourceRecordSet) *ResourceRecordSetsCreateCall { - c := &ResourceRecordSetsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - responsePolicy: User assigned name of the Response Policy +// containing the Response Policy Rule. +func (r *ResponsePolicyRulesService) Create(project string, responsePolicy string, responsepolicyrule *ResponsePolicyRule) *ResponsePolicyRulesCreateCall { + c := &ResponsePolicyRulesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.managedZone = managedZone - c.resourcerecordset = resourcerecordset + c.responsePolicy = responsePolicy + c.responsepolicyrule = responsepolicyrule return c } @@ -5462,7 +9042,7 @@ func (r *ResourceRecordSetsService) Create(project string, managedZone string, r // For mutating operation requests only. An optional identifier // specified by the client. Must be unique for operation resources in // the Operations collection. -func (c *ResourceRecordSetsCreateCall) ClientOperationId(clientOperationId string) *ResourceRecordSetsCreateCall { +func (c *ResponsePolicyRulesCreateCall) ClientOperationId(clientOperationId string) *ResponsePolicyRulesCreateCall { c.urlParams_.Set("clientOperationId", clientOperationId) return c } @@ -5470,7 +9050,7 @@ func (c *ResourceRecordSetsCreateCall) ClientOperationId(clientOperationId strin // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ResourceRecordSetsCreateCall) Fields(s ...googleapi.Field) *ResourceRecordSetsCreateCall { +func (c *ResponsePolicyRulesCreateCall) Fields(s ...googleapi.Field) *ResponsePolicyRulesCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -5478,36 +9058,36 @@ func (c *ResourceRecordSetsCreateCall) Fields(s ...googleapi.Field) *ResourceRec // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ResourceRecordSetsCreateCall) Context(ctx context.Context) *ResourceRecordSetsCreateCall { +func (c *ResponsePolicyRulesCreateCall) Context(ctx context.Context) *ResponsePolicyRulesCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ResourceRecordSetsCreateCall) Header() http.Header { +func (c *ResponsePolicyRulesCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ResourceRecordSetsCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *ResponsePolicyRulesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcerecordset) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.responsepolicyrule) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -5515,20 +9095,20 @@ func (c *ResourceRecordSetsCreateCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "managedZone": c.managedZone, + "project": c.project, + "responsePolicy": c.responsePolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.resourceRecordSets.create" call. -// Exactly one of *ResourceRecordSet or error will be non-nil. Any +// Do executes the "dns.responsePolicyRules.create" call. +// Exactly one of *ResponsePolicyRule or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *ResourceRecordSet.ServerResponse.Header or (if a response was +// *ResponsePolicyRule.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ResourceRecordSetsCreateCall) Do(opts ...googleapi.CallOption) (*ResourceRecordSet, error) { +func (c *ResponsePolicyRulesCreateCall) Do(opts ...googleapi.CallOption) (*ResponsePolicyRule, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -5547,7 +9127,7 @@ func (c *ResourceRecordSetsCreateCall) Do(opts ...googleapi.CallOption) (*Resour if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ResourceRecordSet{ + ret := &ResponsePolicyRule{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -5559,13 +9139,13 @@ func (c *ResourceRecordSetsCreateCall) Do(opts ...googleapi.CallOption) (*Resour } return ret, nil // { - // "description": "Creates a new ResourceRecordSet.", - // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets", + // "description": "Creates a new Response Policy Rule.", + // "flatPath": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules", // "httpMethod": "POST", - // "id": "dns.resourceRecordSets.create", + // "id": "dns.responsePolicyRules.create", // "parameterOrder": [ // "project", - // "managedZone" + // "responsePolicy" // ], // "parameters": { // "clientOperationId": { @@ -5573,25 +9153,25 @@ func (c *ResourceRecordSetsCreateCall) Do(opts ...googleapi.CallOption) (*Resour // "location": "query", // "type": "string" // }, - // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + // "project": { + // "description": "Identifies the project addressed by this request.", // "location": "path", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Identifies the project addressed by this request.", + // "responsePolicy": { + // "description": "User assigned name of the Response Policy containing the Response Policy Rule.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets", + // "path": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules", // "request": { - // "$ref": "ResourceRecordSet" + // "$ref": "ResponsePolicyRule" // }, // "response": { - // "$ref": "ResourceRecordSet" + // "$ref": "ResponsePolicyRule" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -5601,32 +9181,30 @@ func (c *ResourceRecordSetsCreateCall) Do(opts ...googleapi.CallOption) (*Resour } -// method id "dns.resourceRecordSets.delete": +// method id "dns.responsePolicyRules.delete": -type ResourceRecordSetsDeleteCall struct { - s *Service - project string - managedZone string - name string - type_ string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ResponsePolicyRulesDeleteCall struct { + s *Service + project string + responsePolicy string + responsePolicyRule string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes a previously created ResourceRecordSet. +// Delete: Deletes a previously created Response Policy Rule. // -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. -// - name: Fully qualified domain name. // - project: Identifies the project addressed by this request. -// - type: RRSet type. -func (r *ResourceRecordSetsService) Delete(project string, managedZone string, name string, type_ string) *ResourceRecordSetsDeleteCall { - c := &ResourceRecordSetsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - responsePolicy: User assigned name of the Response Policy +// containing the Response Policy Rule. +// - responsePolicyRule: User assigned name of the Response Policy Rule +// addressed by this request. +func (r *ResponsePolicyRulesService) Delete(project string, responsePolicy string, responsePolicyRule string) *ResponsePolicyRulesDeleteCall { + c := &ResponsePolicyRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.managedZone = managedZone - c.name = name - c.type_ = type_ + c.responsePolicy = responsePolicy + c.responsePolicyRule = responsePolicyRule return c } @@ -5634,7 +9212,7 @@ func (r *ResourceRecordSetsService) Delete(project string, managedZone string, n // For mutating operation requests only. An optional identifier // specified by the client. Must be unique for operation resources in // the Operations collection. -func (c *ResourceRecordSetsDeleteCall) ClientOperationId(clientOperationId string) *ResourceRecordSetsDeleteCall { +func (c *ResponsePolicyRulesDeleteCall) ClientOperationId(clientOperationId string) *ResponsePolicyRulesDeleteCall { c.urlParams_.Set("clientOperationId", clientOperationId) return c } @@ -5642,7 +9220,7 @@ func (c *ResourceRecordSetsDeleteCall) ClientOperationId(clientOperationId strin // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ResourceRecordSetsDeleteCall) Fields(s ...googleapi.Field) *ResourceRecordSetsDeleteCall { +func (c *ResponsePolicyRulesDeleteCall) Fields(s ...googleapi.Field) *ResponsePolicyRulesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -5650,23 +9228,23 @@ func (c *ResourceRecordSetsDeleteCall) Fields(s ...googleapi.Field) *ResourceRec // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ResourceRecordSetsDeleteCall) Context(ctx context.Context) *ResourceRecordSetsDeleteCall { +func (c *ResponsePolicyRulesDeleteCall) Context(ctx context.Context) *ResponsePolicyRulesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ResourceRecordSetsDeleteCall) Header() http.Header { +func (c *ResponsePolicyRulesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ResourceRecordSetsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ResponsePolicyRulesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -5674,7 +9252,7 @@ func (c *ResourceRecordSetsDeleteCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules/{responsePolicyRule}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -5682,61 +9260,34 @@ func (c *ResourceRecordSetsDeleteCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "managedZone": c.managedZone, - "name": c.name, - "type": c.type_, + "project": c.project, + "responsePolicy": c.responsePolicy, + "responsePolicyRule": c.responsePolicyRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.resourceRecordSets.delete" call. -// Exactly one of *ResourceRecordSetsDeleteResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ResourceRecordSetsDeleteResponse.ServerResponse.Header or (if -// a response was returned at all) in error.(*googleapi.Error).Header. -// Use googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ResourceRecordSetsDeleteCall) Do(opts ...googleapi.CallOption) (*ResourceRecordSetsDeleteResponse, error) { +// Do executes the "dns.responsePolicyRules.delete" call. +func (c *ResponsePolicyRulesDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ResourceRecordSetsDeleteResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return err } - return ret, nil + return nil // { - // "description": "Deletes a previously created ResourceRecordSet.", - // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", + // "description": "Deletes a previously created Response Policy Rule.", + // "flatPath": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules/{responsePolicyRule}", // "httpMethod": "DELETE", - // "id": "dns.resourceRecordSets.delete", + // "id": "dns.responsePolicyRules.delete", // "parameterOrder": [ // "project", - // "managedZone", - // "name", - // "type" + // "responsePolicy", + // "responsePolicyRule" // ], // "parameters": { // "clientOperationId": { @@ -5744,35 +9295,26 @@ func (c *ResourceRecordSetsDeleteCall) Do(opts ...googleapi.CallOption) (*Resour // "location": "query", // "type": "string" // }, - // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "name": { - // "description": "Fully qualified domain name.", + // "project": { + // "description": "Identifies the project addressed by this request.", // "location": "path", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Identifies the project addressed by this request.", + // "responsePolicy": { + // "description": "User assigned name of the Response Policy containing the Response Policy Rule.", // "location": "path", // "required": true, // "type": "string" // }, - // "type": { - // "description": "RRSet type.", + // "responsePolicyRule": { + // "description": "User assigned name of the Response Policy Rule addressed by this request.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", - // "response": { - // "$ref": "ResourceRecordSetsDeleteResponse" - // }, + // "path": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules/{responsePolicyRule}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" @@ -5781,33 +9323,31 @@ func (c *ResourceRecordSetsDeleteCall) Do(opts ...googleapi.CallOption) (*Resour } -// method id "dns.resourceRecordSets.get": +// method id "dns.responsePolicyRules.get": -type ResourceRecordSetsGetCall struct { - s *Service - project string - managedZone string - name string - type_ string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ResponsePolicyRulesGetCall struct { + s *Service + project string + responsePolicy string + responsePolicyRule string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Fetches the representation of an existing ResourceRecordSet. +// Get: Fetches the representation of an existing Response Policy Rule. // -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. -// - name: Fully qualified domain name. // - project: Identifies the project addressed by this request. -// - type: RRSet type. -func (r *ResourceRecordSetsService) Get(project string, managedZone string, name string, type_ string) *ResourceRecordSetsGetCall { - c := &ResourceRecordSetsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - responsePolicy: User assigned name of the Response Policy +// containing the Response Policy Rule. +// - responsePolicyRule: User assigned name of the Response Policy Rule +// addressed by this request. +func (r *ResponsePolicyRulesService) Get(project string, responsePolicy string, responsePolicyRule string) *ResponsePolicyRulesGetCall { + c := &ResponsePolicyRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.managedZone = managedZone - c.name = name - c.type_ = type_ + c.responsePolicy = responsePolicy + c.responsePolicyRule = responsePolicyRule return c } @@ -5815,7 +9355,7 @@ func (r *ResourceRecordSetsService) Get(project string, managedZone string, name // For mutating operation requests only. An optional identifier // specified by the client. Must be unique for operation resources in // the Operations collection. -func (c *ResourceRecordSetsGetCall) ClientOperationId(clientOperationId string) *ResourceRecordSetsGetCall { +func (c *ResponsePolicyRulesGetCall) ClientOperationId(clientOperationId string) *ResponsePolicyRulesGetCall { c.urlParams_.Set("clientOperationId", clientOperationId) return c } @@ -5823,7 +9363,7 @@ func (c *ResourceRecordSetsGetCall) ClientOperationId(clientOperationId string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ResourceRecordSetsGetCall) Fields(s ...googleapi.Field) *ResourceRecordSetsGetCall { +func (c *ResponsePolicyRulesGetCall) Fields(s ...googleapi.Field) *ResponsePolicyRulesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -5833,7 +9373,7 @@ func (c *ResourceRecordSetsGetCall) Fields(s ...googleapi.Field) *ResourceRecord // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ResourceRecordSetsGetCall) IfNoneMatch(entityTag string) *ResourceRecordSetsGetCall { +func (c *ResponsePolicyRulesGetCall) IfNoneMatch(entityTag string) *ResponsePolicyRulesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -5841,23 +9381,23 @@ func (c *ResourceRecordSetsGetCall) IfNoneMatch(entityTag string) *ResourceRecor // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ResourceRecordSetsGetCall) Context(ctx context.Context) *ResourceRecordSetsGetCall { +func (c *ResponsePolicyRulesGetCall) Context(ctx context.Context) *ResponsePolicyRulesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ResourceRecordSetsGetCall) Header() http.Header { +func (c *ResponsePolicyRulesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ResourceRecordSetsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ResponsePolicyRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -5868,7 +9408,7 @@ func (c *ResourceRecordSetsGetCall) doRequest(alt string) (*http.Response, error var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules/{responsePolicyRule}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -5876,22 +9416,21 @@ func (c *ResourceRecordSetsGetCall) doRequest(alt string) (*http.Response, error } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "managedZone": c.managedZone, - "name": c.name, - "type": c.type_, + "project": c.project, + "responsePolicy": c.responsePolicy, + "responsePolicyRule": c.responsePolicyRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.resourceRecordSets.get" call. -// Exactly one of *ResourceRecordSet or error will be non-nil. Any +// Do executes the "dns.responsePolicyRules.get" call. +// Exactly one of *ResponsePolicyRule or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *ResourceRecordSet.ServerResponse.Header or (if a response was +// *ResponsePolicyRule.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ResourceRecordSetsGetCall) Do(opts ...googleapi.CallOption) (*ResourceRecordSet, error) { +func (c *ResponsePolicyRulesGetCall) Do(opts ...googleapi.CallOption) (*ResponsePolicyRule, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -5910,7 +9449,7 @@ func (c *ResourceRecordSetsGetCall) Do(opts ...googleapi.CallOption) (*ResourceR if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ResourceRecordSet{ + ret := &ResponsePolicyRule{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -5922,15 +9461,14 @@ func (c *ResourceRecordSetsGetCall) Do(opts ...googleapi.CallOption) (*ResourceR } return ret, nil // { - // "description": "Fetches the representation of an existing ResourceRecordSet.", - // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", + // "description": "Fetches the representation of an existing Response Policy Rule.", + // "flatPath": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules/{responsePolicyRule}", // "httpMethod": "GET", - // "id": "dns.resourceRecordSets.get", + // "id": "dns.responsePolicyRules.get", // "parameterOrder": [ // "project", - // "managedZone", - // "name", - // "type" + // "responsePolicy", + // "responsePolicyRule" // ], // "parameters": { // "clientOperationId": { @@ -5938,34 +9476,28 @@ func (c *ResourceRecordSetsGetCall) Do(opts ...googleapi.CallOption) (*ResourceR // "location": "query", // "type": "string" // }, - // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "name": { - // "description": "Fully qualified domain name.", + // "project": { + // "description": "Identifies the project addressed by this request.", // "location": "path", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Identifies the project addressed by this request.", + // "responsePolicy": { + // "description": "User assigned name of the Response Policy containing the Response Policy Rule.", // "location": "path", // "required": true, // "type": "string" // }, - // "type": { - // "description": "RRSet type.", + // "responsePolicyRule": { + // "description": "User assigned name of the Response Policy Rule addressed by this request.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", + // "path": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules/{responsePolicyRule}", // "response": { - // "$ref": "ResourceRecordSet" + // "$ref": "ResponsePolicyRule" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -5974,69 +9506,52 @@ func (c *ResourceRecordSetsGetCall) Do(opts ...googleapi.CallOption) (*ResourceR // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" // ] // } - -} - -// method id "dns.resourceRecordSets.list": - -type ResourceRecordSetsListCall struct { - s *Service - project string - managedZone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header + } -// List: Enumerates ResourceRecordSets that you have created but not yet -// deleted. +// method id "dns.responsePolicyRules.list": + +type ResponsePolicyRulesListCall struct { + s *Service + project string + responsePolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Enumerates all Response Policy Rules associated with a project. // -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. // - project: Identifies the project addressed by this request. -func (r *ResourceRecordSetsService) List(project string, managedZone string) *ResourceRecordSetsListCall { - c := &ResourceRecordSetsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - responsePolicy: User assigned name of the Response Policy to list. +func (r *ResponsePolicyRulesService) List(project string, responsePolicy string) *ResponsePolicyRulesListCall { + c := &ResponsePolicyRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.managedZone = managedZone + c.responsePolicy = responsePolicy return c } // MaxResults sets the optional parameter "maxResults": Maximum number // of results to be returned. If unspecified, the server decides how // many results to return. -func (c *ResourceRecordSetsListCall) MaxResults(maxResults int64) *ResourceRecordSetsListCall { +func (c *ResponsePolicyRulesListCall) MaxResults(maxResults int64) *ResponsePolicyRulesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// Name sets the optional parameter "name": Restricts the list to return -// only records with this fully qualified domain name. -func (c *ResourceRecordSetsListCall) Name(name string) *ResourceRecordSetsListCall { - c.urlParams_.Set("name", name) - return c -} - // PageToken sets the optional parameter "pageToken": A tag returned by // a previous list request that was truncated. Use this parameter to // continue a previous list request. -func (c *ResourceRecordSetsListCall) PageToken(pageToken string) *ResourceRecordSetsListCall { +func (c *ResponsePolicyRulesListCall) PageToken(pageToken string) *ResponsePolicyRulesListCall { c.urlParams_.Set("pageToken", pageToken) return c } -// Type sets the optional parameter "type": Restricts the list to return -// only records of this type. If present, the "name" parameter must also -// be present. -func (c *ResourceRecordSetsListCall) Type(type_ string) *ResourceRecordSetsListCall { - c.urlParams_.Set("type", type_) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ResourceRecordSetsListCall) Fields(s ...googleapi.Field) *ResourceRecordSetsListCall { +func (c *ResponsePolicyRulesListCall) Fields(s ...googleapi.Field) *ResponsePolicyRulesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -6046,7 +9561,7 @@ func (c *ResourceRecordSetsListCall) Fields(s ...googleapi.Field) *ResourceRecor // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ResourceRecordSetsListCall) IfNoneMatch(entityTag string) *ResourceRecordSetsListCall { +func (c *ResponsePolicyRulesListCall) IfNoneMatch(entityTag string) *ResponsePolicyRulesListCall { c.ifNoneMatch_ = entityTag return c } @@ -6054,23 +9569,23 @@ func (c *ResourceRecordSetsListCall) IfNoneMatch(entityTag string) *ResourceReco // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ResourceRecordSetsListCall) Context(ctx context.Context) *ResourceRecordSetsListCall { +func (c *ResponsePolicyRulesListCall) Context(ctx context.Context) *ResponsePolicyRulesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ResourceRecordSetsListCall) Header() http.Header { +func (c *ResponsePolicyRulesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ResourceRecordSetsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ResponsePolicyRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -6081,7 +9596,7 @@ func (c *ResourceRecordSetsListCall) doRequest(alt string) (*http.Response, erro var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -6089,20 +9604,20 @@ func (c *ResourceRecordSetsListCall) doRequest(alt string) (*http.Response, erro } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "managedZone": c.managedZone, + "project": c.project, + "responsePolicy": c.responsePolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.resourceRecordSets.list" call. -// Exactly one of *ResourceRecordSetsListResponse or error will be +// Do executes the "dns.responsePolicyRules.list" call. +// Exactly one of *ResponsePolicyRulesListResponse or error will be // non-nil. Any non-2xx status code is an error. Response headers are in -// either *ResourceRecordSetsListResponse.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was +// either *ResponsePolicyRulesListResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ResourceRecordSetsListCall) Do(opts ...googleapi.CallOption) (*ResourceRecordSetsListResponse, error) { +func (c *ResponsePolicyRulesListCall) Do(opts ...googleapi.CallOption) (*ResponsePolicyRulesListResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6121,7 +9636,7 @@ func (c *ResourceRecordSetsListCall) Do(opts ...googleapi.CallOption) (*Resource if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ResourceRecordSetsListResponse{ + ret := &ResponsePolicyRulesListResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6133,32 +9648,21 @@ func (c *ResourceRecordSetsListCall) Do(opts ...googleapi.CallOption) (*Resource } return ret, nil // { - // "description": "Enumerates ResourceRecordSets that you have created but not yet deleted.", - // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets", + // "description": "Enumerates all Response Policy Rules associated with a project.", + // "flatPath": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules", // "httpMethod": "GET", - // "id": "dns.resourceRecordSets.list", + // "id": "dns.responsePolicyRules.list", // "parameterOrder": [ // "project", - // "managedZone" + // "responsePolicy" // ], // "parameters": { - // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "maxResults": { // "description": "Optional. Maximum number of results to be returned. If unspecified, the server decides how many results to return.", // "format": "int32", // "location": "query", // "type": "integer" // }, - // "name": { - // "description": "Restricts the list to return only records with this fully qualified domain name.", - // "location": "query", - // "type": "string" - // }, // "pageToken": { // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", // "location": "query", @@ -6170,15 +9674,16 @@ func (c *ResourceRecordSetsListCall) Do(opts ...googleapi.CallOption) (*Resource // "required": true, // "type": "string" // }, - // "type": { - // "description": "Restricts the list to return only records of this type. If present, the \"name\" parameter must also be present.", - // "location": "query", + // "responsePolicy": { + // "description": "User assigned name of the Response Policy to list.", + // "location": "path", + // "required": true, // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets", + // "path": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules", // "response": { - // "$ref": "ResourceRecordSetsListResponse" + // "$ref": "ResponsePolicyRulesListResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -6193,7 +9698,7 @@ func (c *ResourceRecordSetsListCall) Do(opts ...googleapi.CallOption) (*Resource // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ResourceRecordSetsListCall) Pages(ctx context.Context, f func(*ResourceRecordSetsListResponse) error) error { +func (c *ResponsePolicyRulesListCall) Pages(ctx context.Context, f func(*ResponsePolicyRulesListResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -6211,34 +9716,32 @@ func (c *ResourceRecordSetsListCall) Pages(ctx context.Context, f func(*Resource } } -// method id "dns.resourceRecordSets.patch": +// method id "dns.responsePolicyRules.patch": -type ResourceRecordSetsPatchCall struct { - s *Service - project string - managedZone string - name string - type_ string - resourcerecordset *ResourceRecordSet - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ResponsePolicyRulesPatchCall struct { + s *Service + project string + responsePolicy string + responsePolicyRule string + responsepolicyrule *ResponsePolicyRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Applies a partial update to an existing ResourceRecordSet. +// Patch: Applies a partial update to an existing Response Policy Rule. // -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. -// - name: Fully qualified domain name. // - project: Identifies the project addressed by this request. -// - type: RRSet type. -func (r *ResourceRecordSetsService) Patch(project string, managedZone string, name string, type_ string, resourcerecordset *ResourceRecordSet) *ResourceRecordSetsPatchCall { - c := &ResourceRecordSetsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - responsePolicy: User assigned name of the Response Policy +// containing the Response Policy Rule. +// - responsePolicyRule: User assigned name of the Response Policy Rule +// addressed by this request. +func (r *ResponsePolicyRulesService) Patch(project string, responsePolicy string, responsePolicyRule string, responsepolicyrule *ResponsePolicyRule) *ResponsePolicyRulesPatchCall { + c := &ResponsePolicyRulesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.managedZone = managedZone - c.name = name - c.type_ = type_ - c.resourcerecordset = resourcerecordset + c.responsePolicy = responsePolicy + c.responsePolicyRule = responsePolicyRule + c.responsepolicyrule = responsepolicyrule return c } @@ -6246,7 +9749,7 @@ func (r *ResourceRecordSetsService) Patch(project string, managedZone string, na // For mutating operation requests only. An optional identifier // specified by the client. Must be unique for operation resources in // the Operations collection. -func (c *ResourceRecordSetsPatchCall) ClientOperationId(clientOperationId string) *ResourceRecordSetsPatchCall { +func (c *ResponsePolicyRulesPatchCall) ClientOperationId(clientOperationId string) *ResponsePolicyRulesPatchCall { c.urlParams_.Set("clientOperationId", clientOperationId) return c } @@ -6254,7 +9757,7 @@ func (c *ResourceRecordSetsPatchCall) ClientOperationId(clientOperationId string // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ResourceRecordSetsPatchCall) Fields(s ...googleapi.Field) *ResourceRecordSetsPatchCall { +func (c *ResponsePolicyRulesPatchCall) Fields(s ...googleapi.Field) *ResponsePolicyRulesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -6262,36 +9765,36 @@ func (c *ResourceRecordSetsPatchCall) Fields(s ...googleapi.Field) *ResourceReco // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ResourceRecordSetsPatchCall) Context(ctx context.Context) *ResourceRecordSetsPatchCall { +func (c *ResponsePolicyRulesPatchCall) Context(ctx context.Context) *ResponsePolicyRulesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ResourceRecordSetsPatchCall) Header() http.Header { +func (c *ResponsePolicyRulesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ResourceRecordSetsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ResponsePolicyRulesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcerecordset) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.responsepolicyrule) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules/{responsePolicyRule}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -6299,22 +9802,21 @@ func (c *ResourceRecordSetsPatchCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "managedZone": c.managedZone, - "name": c.name, - "type": c.type_, + "project": c.project, + "responsePolicy": c.responsePolicy, + "responsePolicyRule": c.responsePolicyRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "dns.resourceRecordSets.patch" call. -// Exactly one of *ResourceRecordSet or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ResourceRecordSet.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was +// Do executes the "dns.responsePolicyRules.patch" call. +// Exactly one of *ResponsePolicyRulesPatchResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ResponsePolicyRulesPatchResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ResourceRecordSetsPatchCall) Do(opts ...googleapi.CallOption) (*ResourceRecordSet, error) { +func (c *ResponsePolicyRulesPatchCall) Do(opts ...googleapi.CallOption) (*ResponsePolicyRulesPatchResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6333,7 +9835,7 @@ func (c *ResourceRecordSetsPatchCall) Do(opts ...googleapi.CallOption) (*Resourc if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ResourceRecordSet{ + ret := &ResponsePolicyRulesPatchResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6345,15 +9847,14 @@ func (c *ResourceRecordSetsPatchCall) Do(opts ...googleapi.CallOption) (*Resourc } return ret, nil // { - // "description": "Applies a partial update to an existing ResourceRecordSet.", - // "flatPath": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", + // "description": "Applies a partial update to an existing Response Policy Rule.", + // "flatPath": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules/{responsePolicyRule}", // "httpMethod": "PATCH", - // "id": "dns.resourceRecordSets.patch", + // "id": "dns.responsePolicyRules.patch", // "parameterOrder": [ // "project", - // "managedZone", - // "name", - // "type" + // "responsePolicy", + // "responsePolicyRule" // ], // "parameters": { // "clientOperationId": { @@ -6361,17 +9862,186 @@ func (c *ResourceRecordSetsPatchCall) Do(opts ...googleapi.CallOption) (*Resourc // "location": "query", // "type": "string" // }, - // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or ID.", + // "project": { + // "description": "Identifies the project addressed by this request.", // "location": "path", // "required": true, // "type": "string" // }, - // "name": { - // "description": "Fully qualified domain name.", + // "responsePolicy": { + // "description": "User assigned name of the Response Policy containing the Response Policy Rule.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "responsePolicyRule": { + // "description": "User assigned name of the Response Policy Rule addressed by this request.", // "location": "path", // "required": true, // "type": "string" + // } + // }, + // "path": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules/{responsePolicyRule}", + // "request": { + // "$ref": "ResponsePolicyRule" + // }, + // "response": { + // "$ref": "ResponsePolicyRulesPatchResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.responsePolicyRules.update": + +type ResponsePolicyRulesUpdateCall struct { + s *Service + project string + responsePolicy string + responsePolicyRule string + responsepolicyrule *ResponsePolicyRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an existing Response Policy Rule. +// +// - project: Identifies the project addressed by this request. +// - responsePolicy: User assigned name of the Response Policy +// containing the Response Policy Rule. +// - responsePolicyRule: User assigned name of the Response Policy Rule +// addressed by this request. +func (r *ResponsePolicyRulesService) Update(project string, responsePolicy string, responsePolicyRule string, responsepolicyrule *ResponsePolicyRule) *ResponsePolicyRulesUpdateCall { + c := &ResponsePolicyRulesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.responsePolicy = responsePolicy + c.responsePolicyRule = responsePolicyRule + c.responsepolicyrule = responsepolicyrule + return c +} + +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ResponsePolicyRulesUpdateCall) ClientOperationId(clientOperationId string) *ResponsePolicyRulesUpdateCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResponsePolicyRulesUpdateCall) Fields(s ...googleapi.Field) *ResponsePolicyRulesUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResponsePolicyRulesUpdateCall) Context(ctx context.Context) *ResponsePolicyRulesUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResponsePolicyRulesUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResponsePolicyRulesUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.responsepolicyrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules/{responsePolicyRule}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "responsePolicy": c.responsePolicy, + "responsePolicyRule": c.responsePolicyRule, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.responsePolicyRules.update" call. +// Exactly one of *ResponsePolicyRulesUpdateResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ResponsePolicyRulesUpdateResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ResponsePolicyRulesUpdateCall) Do(opts ...googleapi.CallOption) (*ResponsePolicyRulesUpdateResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ResponsePolicyRulesUpdateResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an existing Response Policy Rule.", + // "flatPath": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules/{responsePolicyRule}", + // "httpMethod": "PUT", + // "id": "dns.responsePolicyRules.update", + // "parameterOrder": [ + // "project", + // "responsePolicy", + // "responsePolicyRule" + // ], + // "parameters": { + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" // }, // "project": { // "description": "Identifies the project addressed by this request.", @@ -6379,19 +10049,25 @@ func (c *ResourceRecordSetsPatchCall) Do(opts ...googleapi.CallOption) (*Resourc // "required": true, // "type": "string" // }, - // "type": { - // "description": "RRSet type.", + // "responsePolicy": { + // "description": "User assigned name of the Response Policy containing the Response Policy Rule.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "responsePolicyRule": { + // "description": "User assigned name of the Response Policy Rule addressed by this request.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "dns/v1/projects/{project}/managedZones/{managedZone}/rrsets/{name}/{type}", + // "path": "dns/v1/projects/{project}/responsePolicies/{responsePolicy}/rules/{responsePolicyRule}", // "request": { - // "$ref": "ResourceRecordSet" + // "$ref": "ResponsePolicyRule" // }, // "response": { - // "$ref": "ResourceRecordSet" + // "$ref": "ResponsePolicyRulesUpdateResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index 1a79e6d53..2d3e00edc 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -15,6 +15,7 @@ import ( "net/http" "net/url" "strings" + "time" "google.golang.org/api/internal/third_party/uritemplates" ) @@ -140,6 +141,7 @@ func CheckResponse(res *http.Response) error { jerr.Error.Code = res.StatusCode } jerr.Error.Body = string(slurp) + jerr.Error.Header = res.Header return jerr.Error } } @@ -245,12 +247,30 @@ func ChunkSize(size int) MediaOption { return chunkSizeOption(size) } +type chunkRetryDeadlineOption time.Duration + +func (cd chunkRetryDeadlineOption) setOptions(o *MediaOptions) { + o.ChunkRetryDeadline = time.Duration(cd) +} + +// ChunkRetryDeadline returns a MediaOption which sets a per-chunk retry +// deadline. If a single chunk has been attempting to upload for longer than +// this time and the request fails, it will no longer be retried, and the error +// will be returned to the caller. +// This is only applicable for files which are large enough to require +// a multi-chunk resumable upload. +// The default value is 32s. +// To set a deadline on the entire upload, use context timeout or cancellation. +func ChunkRetryDeadline(deadline time.Duration) MediaOption { + return chunkRetryDeadlineOption(deadline) +} + // MediaOptions stores options for customizing media upload. It is not used by developers directly. type MediaOptions struct { ContentType string ForceEmptyContentType bool - - ChunkSize int + ChunkSize int + ChunkRetryDeadline time.Duration } // ProcessMediaOptions stores options from opts in a MediaOptions. diff --git a/vendor/google.golang.org/api/internal/gensupport/media.go b/vendor/google.golang.org/api/internal/gensupport/media.go index 0460ab594..d14a22470 100644 --- a/vendor/google.golang.org/api/internal/gensupport/media.go +++ b/vendor/google.golang.org/api/internal/gensupport/media.go @@ -15,6 +15,7 @@ import ( "net/textproto" "strings" "sync" + "time" "google.golang.org/api/googleapi" ) @@ -217,12 +218,13 @@ func PrepareUpload(media io.Reader, chunkSize int) (r io.Reader, mb *MediaBuffer // code only. type MediaInfo struct { // At most one of Media and MediaBuffer will be set. - media io.Reader - buffer *MediaBuffer - singleChunk bool - mType string - size int64 // mediaSize, if known. Used only for calls to progressUpdater_. - progressUpdater googleapi.ProgressUpdater + media io.Reader + buffer *MediaBuffer + singleChunk bool + mType string + size int64 // mediaSize, if known. Used only for calls to progressUpdater_. + progressUpdater googleapi.ProgressUpdater + chunkRetryDeadline time.Duration } // NewInfoFromMedia should be invoked from the Media method of a call. It returns a @@ -234,6 +236,7 @@ func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { if !opts.ForceEmptyContentType { r, mi.mType = DetermineContentType(r, opts.ContentType) } + mi.chunkRetryDeadline = opts.ChunkRetryDeadline mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize) return mi } @@ -356,6 +359,7 @@ func (mi *MediaInfo) ResumableUpload(locURI string) *ResumableUpload { mi.progressUpdater(curr, mi.size) } }, + ChunkRetryDeadline: mi.chunkRetryDeadline, } } diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go index 0fb74606c..0c659188d 100644 --- a/vendor/google.golang.org/api/internal/gensupport/resumable.go +++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -10,8 +10,12 @@ import ( "fmt" "io" "net/http" + "strings" "sync" "time" + + "github.com/google/uuid" + "google.golang.org/api/internal" ) // ResumableUpload is used by the generated APIs to provide resumable uploads. @@ -34,6 +38,15 @@ type ResumableUpload struct { // Retry optionally configures retries for requests made against the upload. Retry *RetryConfig + + // ChunkRetryDeadline configures the per-chunk deadline after which no further + // retries should happen. + ChunkRetryDeadline time.Duration + + // Track current request invocation ID and attempt count for retry metric + // headers. + invocationID string + attempts int } // Progress returns the number of bytes uploaded at this point. @@ -68,6 +81,10 @@ func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, req.Header.Set("Content-Type", rx.MediaType) req.Header.Set("User-Agent", rx.UserAgent) + baseXGoogHeader := "gl-go/" + GoVersion() + " gdcl/" + internal.Version + invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", rx.invocationID, rx.attempts) + req.Header.Set("X-Goog-Api-Client", strings.Join([]string{baseXGoogHeader, invocationHeader}, " ")) + // Google's upload endpoint uses status code 308 for a // different purpose than the "308 Permanent Redirect" // since-standardized in RFC 7238. Because of the conflict in @@ -151,18 +168,34 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err } return nil, err } + // This case is very unlikely but possible only if rx.ChunkRetryDeadline is + // set to a very small value, in which case no requests will be sent before + // the deadline. Return an error to avoid causing a panic. + if resp == nil { + return nil, fmt.Errorf("upload request to %v not sent, choose larger value for ChunkRetryDealine", rx.URI) + } return resp, nil } // Configure retryable error criteria. errorFunc := rx.Retry.errorFunc() + // Configure per-chunk retry deadline. + var retryDeadline time.Duration + if rx.ChunkRetryDeadline != 0 { + retryDeadline = rx.ChunkRetryDeadline + } else { + retryDeadline = defaultRetryDeadline + } + // Send all chunks. for { var pause time.Duration - // Each chunk gets its own initialized-at-zero backoff. + // Each chunk gets its own initialized-at-zero backoff and invocation ID. bo := rx.Retry.backoff() quitAfter := time.After(retryDeadline) + rx.attempts = 1 + rx.invocationID = uuid.New().String() // Retry loop for a single chunk. for { @@ -177,6 +210,22 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err return prepareReturn(resp, err) } + // Check for context cancellation or timeout once more. If more than one + // case in the select statement above was satisfied at the same time, Go + // will choose one arbitrarily. + // That can cause an operation to go through even if the context was + // canceled before or the timeout was reached. + select { + case <-ctx.Done(): + if err == nil { + err = ctx.Err() + } + return prepareReturn(resp, err) + case <-quitAfter: + return prepareReturn(resp, err) + default: + } + resp, err = rx.transferChunk(ctx) var status int @@ -189,6 +238,7 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err break } + rx.attempts++ pause = bo.Pause() if resp != nil && resp.Body != nil { resp.Body.Close() diff --git a/vendor/google.golang.org/api/internal/gensupport/retry.go b/vendor/google.golang.org/api/internal/gensupport/retry.go index 4a4861b1b..20b57d925 100644 --- a/vendor/google.golang.org/api/internal/gensupport/retry.go +++ b/vendor/google.golang.org/api/internal/gensupport/retry.go @@ -5,7 +5,10 @@ package gensupport import ( + "errors" "io" + "net" + "strings" "time" "github.com/googleapis/gax-go/v2" @@ -20,8 +23,8 @@ type Backoff interface { // These are declared as global variables so that tests can overwrite them. var ( - // Per-chunk deadline for resumable uploads. - retryDeadline = 32 * time.Second + // Default per-chunk deadline for resumable uploads. + defaultRetryDeadline = 32 * time.Second // Default backoff timer. backoff = func() Backoff { return &gax.Backoff{Initial: 100 * time.Millisecond} @@ -36,6 +39,10 @@ const ( // should be retried. // https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes statusTooManyRequests = 429 + + // statusRequestTimeout is returned by the storage API if the + // upload connection was broken. The request should be retried. + statusRequestTimeout = 408 ) // shouldRetry indicates whether an error is retryable for the purposes of this @@ -46,7 +53,7 @@ func shouldRetry(status int, err error) bool { if 500 <= status && status <= 599 { return true } - if status == statusTooManyRequests { + if status == statusTooManyRequests || status == statusRequestTimeout { return true } if err == io.ErrUnexpectedEOF { @@ -61,6 +68,14 @@ func shouldRetry(status int, err error) bool { return true } } + var opErr *net.OpError + if errors.As(err, &opErr) { + if strings.Contains(opErr.Error(), "use of closed network connection") { + // TODO: check against net.ErrClosed (go 1.16+) instead of string + return true + } + } + // If Go 1.13 error unwrapping is available, use this to examine wrapped // errors. if err, ok := err.(interface{ Unwrap() error }); ok { diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index dab64aef3..70a8e01c1 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -8,9 +8,12 @@ import ( "context" "encoding/json" "errors" + "fmt" "net/http" + "strings" "time" + "github.com/google/uuid" "github.com/googleapis/gax-go/v2" ) @@ -71,6 +74,9 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r var resp *http.Response var err error + attempts := 1 + invocationID := uuid.New().String() + baseXGoogHeader := req.Header.Get("X-Goog-Api-Client") // Loop to retry the request, up to the context deadline. var pause time.Duration @@ -99,6 +105,20 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r case <-time.After(pause): } + if ctx.Err() != nil { + // Check for context cancellation once more. If more than one case in a + // select is satisfied at the same time, Go will choose one arbitrarily. + // That can cause an operation to go through even if the context was + // canceled before. + if err == nil { + err = ctx.Err() + } + return resp, err + } + invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", invocationID, attempts) + xGoogHeader := strings.Join([]string{invocationHeader, baseXGoogHeader}, " ") + req.Header.Set("X-Goog-Api-Client", xGoogHeader) + resp, err = client.Do(req.WithContext(ctx)) var status int @@ -112,6 +132,7 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r if req.GetBody == nil || !errorFunc(status, err) { break } + attempts++ var errBody error req.Body, errBody = req.GetBody() if errBody != nil { diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go new file mode 100644 index 000000000..a0ec7adb4 --- /dev/null +++ b/vendor/google.golang.org/api/internal/version.go @@ -0,0 +1,8 @@ +// Copyright 2022 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +// Version is the current tagged release of the library. +const Version = "0.88.0" diff --git a/vendor/google.golang.org/api/option/credentials_go19.go b/vendor/google.golang.org/api/option/credentials_go19.go deleted file mode 100644 index 2e3d2dedb..000000000 --- a/vendor/google.golang.org/api/option/credentials_go19.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2018 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 -// +build go1.9 - -package option - -import ( - "golang.org/x/oauth2/google" - "google.golang.org/api/internal" -) - -type withCreds google.Credentials - -func (w *withCreds) Apply(o *internal.DialSettings) { - o.Credentials = (*google.Credentials)(w) -} - -// WithCredentials returns a ClientOption that authenticates API calls. -func WithCredentials(creds *google.Credentials) ClientOption { - return (*withCreds)(creds) -} diff --git a/vendor/google.golang.org/api/option/credentials_notgo19.go b/vendor/google.golang.org/api/option/credentials_notgo19.go deleted file mode 100644 index 728a62f1a..000000000 --- a/vendor/google.golang.org/api/option/credentials_notgo19.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2018 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 -// +build !go1.9 - -package option - -import ( - "golang.org/x/oauth2/google" - "google.golang.org/api/internal" -) - -type withCreds google.DefaultCredentials - -func (w *withCreds) Apply(o *internal.DialSettings) { - o.Credentials = (*google.DefaultCredentials)(w) -} - -func WithCredentials(creds *google.DefaultCredentials) ClientOption { - return (*withCreds)(creds) -} diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index 9ff697e0b..60743c63e 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -10,6 +10,7 @@ import ( "net/http" "golang.org/x/oauth2" + "golang.org/x/oauth2/google" "google.golang.org/api/internal" "google.golang.org/api/internal/impersonate" "google.golang.org/grpc" @@ -144,8 +145,6 @@ func (w withGRPCDialOption) Apply(o *internal.DialSettings) { // WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC // connections that requests will be balanced between. -// -// This is an EXPERIMENTAL API and may be changed or removed in the future. func WithGRPCConnectionPool(size int) ClientOption { return withGRPCConnectionPool(size) } @@ -328,3 +327,14 @@ func (i impersonateServiceAccount) Apply(o *internal.DialSettings) { o.ImpersonationConfig.Delegates = make([]string, len(i.delegates)) copy(o.ImpersonationConfig.Delegates, i.delegates) } + +type withCreds google.Credentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.Credentials = (*google.Credentials)(w) +} + +// WithCredentials returns a ClientOption that authenticates API calls. +func WithCredentials(creds *google.Credentials) ClientOption { + return (*withCreds)(creds) +} diff --git a/vendor/google.golang.org/api/transport/cert/default_cert.go b/vendor/google.golang.org/api/transport/cert/default_cert.go index 04aefec0a..21d025153 100644 --- a/vendor/google.golang.org/api/transport/cert/default_cert.go +++ b/vendor/google.golang.org/api/transport/cert/default_cert.go @@ -14,32 +14,19 @@ package cert import ( "crypto/tls" - "crypto/x509" - "encoding/json" "errors" - "fmt" - "io/ioutil" - "os" - "os/exec" - "os/user" - "path/filepath" "sync" - "time" -) - -const ( - metadataPath = ".secureConnect" - metadataFile = "context_aware_metadata.json" ) // defaultCertData holds all the variables pertaining to // the default certficate source created by DefaultSource. +// +// A singleton model is used to allow the source to be reused +// by the transport layer. type defaultCertData struct { - once sync.Once - source Source - err error - cachedCertMutex sync.Mutex - cachedCert *tls.Certificate + once sync.Once + source Source + err error } var ( @@ -49,93 +36,23 @@ var ( // Source is a function that can be passed into crypto/tls.Config.GetClientCertificate. type Source func(*tls.CertificateRequestInfo) (*tls.Certificate, error) -// DefaultSource returns a certificate source that execs the command specified -// in the file at ~/.secureConnect/context_aware_metadata.json +// errSourceUnavailable is a sentinel error to indicate certificate source is unavailable. +var errSourceUnavailable = errors.New("certificate source is unavailable") + +// DefaultSource returns a certificate source using the preferred EnterpriseCertificateProxySource. +// If EnterpriseCertificateProxySource is not available, fall back to the legacy SecureConnectSource. // -// If that file does not exist, a nil source is returned. +// If neither source is available (due to missing configurations), a nil Source and a nil Error are +// returned to indicate that a default certificate source is unavailable. func DefaultSource() (Source, error) { defaultCert.once.Do(func() { - defaultCert.source, defaultCert.err = newSecureConnectSource() + defaultCert.source, defaultCert.err = NewEnterpriseCertificateProxySource("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.source, defaultCert.err = NewSecureConnectSource("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.source, defaultCert.err = nil, nil + } + } }) return defaultCert.source, defaultCert.err } - -type secureConnectSource struct { - metadata secureConnectMetadata -} - -type secureConnectMetadata struct { - Cmd []string `json:"cert_provider_command"` -} - -// newSecureConnectSource creates a secureConnectSource by reading the well-known file. -func newSecureConnectSource() (Source, error) { - user, err := user.Current() - if err != nil { - // Ignore. - return nil, nil - } - filename := filepath.Join(user.HomeDir, metadataPath, metadataFile) - file, err := ioutil.ReadFile(filename) - if os.IsNotExist(err) { - // Ignore. - return nil, nil - } - if err != nil { - return nil, err - } - - var metadata secureConnectMetadata - if err := json.Unmarshal(file, &metadata); err != nil { - return nil, fmt.Errorf("cert: could not parse JSON in %q: %v", filename, err) - } - if err := validateMetadata(metadata); err != nil { - return nil, fmt.Errorf("cert: invalid config in %q: %v", filename, err) - } - return (&secureConnectSource{ - metadata: metadata, - }).getClientCertificate, nil -} - -func validateMetadata(metadata secureConnectMetadata) error { - if len(metadata.Cmd) == 0 { - return errors.New("empty cert_provider_command") - } - return nil -} - -func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { - defaultCert.cachedCertMutex.Lock() - defer defaultCert.cachedCertMutex.Unlock() - if defaultCert.cachedCert != nil && !isCertificateExpired(defaultCert.cachedCert) { - return defaultCert.cachedCert, nil - } - // Expand OS environment variables in the cert provider command such as "$HOME". - for i := 0; i < len(s.metadata.Cmd); i++ { - s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i]) - } - command := s.metadata.Cmd - data, err := exec.Command(command[0], command[1:]...).Output() - if err != nil { - // TODO(cbro): read stderr for error message? Might contain sensitive info. - return nil, err - } - cert, err := tls.X509KeyPair(data, data) - if err != nil { - return nil, err - } - defaultCert.cachedCert = &cert - return &cert, nil -} - -// isCertificateExpired returns true if the given cert is expired or invalid. -func isCertificateExpired(cert *tls.Certificate) bool { - if len(cert.Certificate) == 0 { - return true - } - parsed, err := x509.ParseCertificate(cert.Certificate[0]) - if err != nil { - return true - } - return time.Now().After(parsed.NotAfter) -} diff --git a/vendor/google.golang.org/api/transport/cert/enterprise_cert.go b/vendor/google.golang.org/api/transport/cert/enterprise_cert.go new file mode 100644 index 000000000..eaa52e07c --- /dev/null +++ b/vendor/google.golang.org/api/transport/cert/enterprise_cert.go @@ -0,0 +1,56 @@ +// Copyright 2022 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cert contains certificate tools for Google API clients. +// This package is intended to be used with crypto/tls.Config.GetClientCertificate. +// +// The certificates can be used to satisfy Google's Endpoint Validation. +// See https://cloud.google.com/endpoint-verification/docs/overview +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package cert + +import ( + "crypto/tls" + "errors" + "os" + + "github.com/googleapis/enterprise-certificate-proxy/client" +) + +type ecpSource struct { + key *client.Key +} + +// NewEnterpriseCertificateProxySource creates a certificate source +// using the Enterprise Certificate Proxy client, which delegates +// certifcate related operations to an OS-specific "signer binary" +// that communicates with the native keystore (ex. keychain on MacOS). +// +// The configFilePath points to a config file containing relevant parameters +// such as the certificate issuer and the location of the signer binary. +// If configFilePath is empty, the client will attempt to load the config from +// a well-known gcloud location. +func NewEnterpriseCertificateProxySource(configFilePath string) (Source, error) { + key, err := client.Cred(configFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // Config file missing means Enterprise Certificate Proxy is not supported. + return nil, errSourceUnavailable + } + return nil, err + } + + return (&ecpSource{ + key: key, + }).getClientCertificate, nil +} + +func (s *ecpSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + var cert tls.Certificate + cert.PrivateKey = s.key + cert.Certificate = s.key.CertificateChain() + return &cert, nil +} diff --git a/vendor/google.golang.org/api/transport/cert/secureconnect_cert.go b/vendor/google.golang.org/api/transport/cert/secureconnect_cert.go new file mode 100644 index 000000000..5913cab80 --- /dev/null +++ b/vendor/google.golang.org/api/transport/cert/secureconnect_cert.go @@ -0,0 +1,123 @@ +// Copyright 2022 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cert contains certificate tools for Google API clients. +// This package is intended to be used with crypto/tls.Config.GetClientCertificate. +// +// The certificates can be used to satisfy Google's Endpoint Validation. +// See https://cloud.google.com/endpoint-verification/docs/overview +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package cert + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "os/user" + "path/filepath" + "sync" + "time" +) + +const ( + metadataPath = ".secureConnect" + metadataFile = "context_aware_metadata.json" +) + +type secureConnectSource struct { + metadata secureConnectMetadata + + // Cache the cert to avoid executing helper command repeatedly. + cachedCertMutex sync.Mutex + cachedCert *tls.Certificate +} + +type secureConnectMetadata struct { + Cmd []string `json:"cert_provider_command"` +} + +// NewSecureConnectSource creates a certificate source using +// the Secure Connect Helper and its associated metadata file. +// +// The configFilePath points to the location of the context aware metadata file. +// If configFilePath is empty, use the default context aware metadata location. +func NewSecureConnectSource(configFilePath string) (Source, error) { + if configFilePath == "" { + user, err := user.Current() + if err != nil { + // Error locating the default config means Secure Connect is not supported. + return nil, errSourceUnavailable + } + configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile) + } + + file, err := ioutil.ReadFile(configFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // Config file missing means Secure Connect is not supported. + return nil, errSourceUnavailable + } + return nil, err + } + + var metadata secureConnectMetadata + if err := json.Unmarshal(file, &metadata); err != nil { + return nil, fmt.Errorf("cert: could not parse JSON in %q: %w", configFilePath, err) + } + if err := validateMetadata(metadata); err != nil { + return nil, fmt.Errorf("cert: invalid config in %q: %w", configFilePath, err) + } + return (&secureConnectSource{ + metadata: metadata, + }).getClientCertificate, nil +} + +func validateMetadata(metadata secureConnectMetadata) error { + if len(metadata.Cmd) == 0 { + return errors.New("empty cert_provider_command") + } + return nil +} + +func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + s.cachedCertMutex.Lock() + defer s.cachedCertMutex.Unlock() + if s.cachedCert != nil && !isCertificateExpired(s.cachedCert) { + return s.cachedCert, nil + } + // Expand OS environment variables in the cert provider command such as "$HOME". + for i := 0; i < len(s.metadata.Cmd); i++ { + s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i]) + } + command := s.metadata.Cmd + data, err := exec.Command(command[0], command[1:]...).Output() + if err != nil { + return nil, err + } + cert, err := tls.X509KeyPair(data, data) + if err != nil { + return nil, err + } + s.cachedCert = &cert + return &cert, nil +} + +// isCertificateExpired returns true if the given cert is expired or invalid. +func isCertificateExpired(cert *tls.Certificate) bool { + if len(cert.Certificate) == 0 { + return true + } + parsed, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return true + } + return time.Now().After(parsed.NotAfter) +} diff --git a/vendor/google.golang.org/api/transport/http/default_transport_go113.go b/vendor/google.golang.org/api/transport/http/default_transport_go113.go deleted file mode 100644 index 69b2280cd..000000000 --- a/vendor/google.golang.org/api/transport/http/default_transport_go113.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 -// +build go1.13 - -package http - -import "net/http" - -// clonedTransport returns the given RoundTripper as a cloned *http.Transport. -// It returns nil if the RoundTripper can't be cloned or coerced to -// *http.Transport. -func clonedTransport(rt http.RoundTripper) *http.Transport { - t, ok := rt.(*http.Transport) - if !ok { - return nil - } - return t.Clone() -} diff --git a/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go b/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go deleted file mode 100644 index 90ee1bd61..000000000 --- a/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.13 -// +build !go1.13 - -package http - -import "net/http" - -// clonedTransport returns the given RoundTripper as a cloned *http.Transport. -// For versions of Go <1.13, this is not supported, so return nil. -func clonedTransport(rt http.RoundTripper) *http.Transport { - return nil -} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index 179534a0c..cab709f0c 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -210,3 +210,14 @@ func addOCTransport(trans http.RoundTripper, settings *internal.DialSettings) ht Propagation: &propagation.HTTPFormat{}, } } + +// clonedTransport returns the given RoundTripper as a cloned *http.Transport. +// It returns nil if the RoundTripper can't be cloned or coerced to +// *http.Transport. +func clonedTransport(rt http.RoundTripper) *http.Transport { + t, ok := rt.(*http.Transport) + if !ok { + return nil + } + return t.Clone() +} diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index cd03f8c76..52338d004 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -53,9 +53,8 @@ How to get your contributions merged smoothly and quickly. - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - - `make all` to test everything, OR - - `make vet` to catch vet errors - - `make test` to run the tests - - `make testrace` to run tests in race mode + - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode - Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 3220d87be..ae13ddac1 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -25,55 +25,77 @@ // later release. package attributes -import "fmt" - // Attributes is an immutable struct for storing and retrieving generic // key/value pairs. Keys must be hashable, and users should define their own -// types for keys. +// types for keys. Values should not be modified after they are added to an +// Attributes or if they were received from one. If values implement 'Equal(o +// interface{}) bool', it will be called by (*Attributes).Equal to determine +// whether two values with the same key should be considered equal. type Attributes struct { m map[interface{}]interface{} } -// New returns a new Attributes containing all key/value pairs in kvs. If the -// same key appears multiple times, the last value overwrites all previous -// values for that key. Panics if len(kvs) is not even. -func New(kvs ...interface{}) *Attributes { - if len(kvs)%2 != 0 { - panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) - } - a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)} - for i := 0; i < len(kvs)/2; i++ { - a.m[kvs[i*2]] = kvs[i*2+1] - } - return a +// New returns a new Attributes containing the key/value pair. +func New(key, value interface{}) *Attributes { + return &Attributes{m: map[interface{}]interface{}{key: value}} } -// WithValues returns a new Attributes containing all key/value pairs in a and -// kvs. Panics if len(kvs) is not even. If the same key appears multiple -// times, the last value overwrites all previous values for that key. To -// remove an existing key, use a nil value. -func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { +// WithValue returns a new Attributes containing the previous keys and values +// and the new key/value pair. If the same key appears multiple times, the +// last value overwrites all previous values for that key. To remove an +// existing key, use a nil value. value should not be modified later. +func (a *Attributes) WithValue(key, value interface{}) *Attributes { if a == nil { - return New(kvs...) + return New(key, value) } - if len(kvs)%2 != 0 { - panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) - } - n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)} + n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} for k, v := range a.m { n.m[k] = v } - for i := 0; i < len(kvs)/2; i++ { - n.m[kvs[i*2]] = kvs[i*2+1] - } + n.m[key] = value return n } // Value returns the value associated with these attributes for key, or nil if -// no value is associated with key. +// no value is associated with key. The returned value should not be modified. func (a *Attributes) Value(key interface{}) interface{} { if a == nil { return nil } return a.m[key] } + +// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) +// bool' is implemented for a value in the attributes, it is called to +// determine if the value matches the one stored in the other attributes. If +// Equal is not implemented, standard equality is used to determine if the two +// values are equal. Note that some types (e.g. maps) aren't comparable by +// default, so they must be wrapped in a struct, or in an alias type, with Equal +// defined. +func (a *Attributes) Equal(o *Attributes) bool { + if a == nil && o == nil { + return true + } + if a == nil || o == nil { + return false + } + if len(a.m) != len(o.m) { + return false + } + for k, v := range a.m { + ov, ok := o.m[k] + if !ok { + // o missing element of a + return false + } + if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { + if !eq.Equal(ov) { + return false + } + } else if v != ov { + // Fallback to a standard equality check if Value is unimplemented. + return false + } + } + return true +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 178de0898..f7a7697ca 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -27,6 +27,7 @@ import ( "net" "strings" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" @@ -174,25 +175,32 @@ type ClientConn interface { // BuildOptions contains additional information for Build. type BuildOptions struct { - // DialCreds is the transport credential the Balancer implementation can - // use to dial to a remote load balancer server. The Balancer implementations - // can ignore this if it does not need to talk to another party securely. + // DialCreds is the transport credentials to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. DialCreds credentials.TransportCredentials - // CredsBundle is the credentials bundle that the Balancer can use. + // CredsBundle is the credentials bundle to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. CredsBundle credentials.Bundle - // Dialer is the custom dialer the Balancer implementation can use to dial - // to a remote load balancer server. The Balancer implementations - // can ignore this if it doesn't need to talk to remote balancer. + // Dialer is the custom dialer to use when communicating with a remote load + // balancer server. Balancer implementations which do not communicate with a + // remote load balancer server can ignore this field. Dialer func(context.Context, string) (net.Conn, error) - // ChannelzParentID is the entity parent's channelz unique identification number. - ChannelzParentID int64 + // Authority is the server name to use as part of the authentication + // handshake when communicating with a remote load balancer server. Balancer + // implementations which do not communicate with a remote load balancer + // server can ignore this field. + Authority string + // ChannelzParentID is the parent ClientConn's channelz ID. + ChannelzParentID *channelz.Identifier // CustomUserAgent is the custom user agent set on the parent ClientConn. // The balancer should set the same custom user agent if it creates a // ClientConn. CustomUserAgent string - // Target contains the parsed address info of the dial target. It is the same resolver.Target as - // passed to the resolver. - // See the documentation for the resolver.Target type for details about what it contains. + // Target contains the parsed address info of the dial target. It is the + // same resolver.Target as passed to the resolver. See the documentation for + // the resolver.Target type for details about what it contains. Target resolver.Target } diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index 8dd504299..a67074a3a 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -22,7 +22,6 @@ import ( "errors" "fmt" - "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" @@ -42,7 +41,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) cc: cc, pickerBuilder: bb.pickerBuilder, - subConns: make(map[resolver.Address]subConnInfo), + subConns: resolver.NewAddressMap(), scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, @@ -58,11 +57,6 @@ func (bb *baseBuilder) Name() string { return bb.name } -type subConnInfo struct { - subConn balancer.SubConn - attrs *attributes.Attributes -} - type baseBalancer struct { cc balancer.ClientConn pickerBuilder PickerBuilder @@ -70,7 +64,7 @@ type baseBalancer struct { csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State - subConns map[resolver.Address]subConnInfo // `attributes` is stripped from the keys of this map (the addresses) + subConns *resolver.AddressMap scStates map[balancer.SubConn]connectivity.State picker balancer.Picker config Config @@ -81,7 +75,7 @@ type baseBalancer struct { func (b *baseBalancer) ResolverError(err error) { b.resolverErr = err - if len(b.subConns) == 0 { + if b.subConns.Len() == 0 { b.state = connectivity.TransientFailure } @@ -105,53 +99,29 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // Successful resolution; clear resolver error and ensure we return nil. b.resolverErr = nil // addrsSet is the set converted from addrs, it's used for quick lookup of an address. - addrsSet := make(map[resolver.Address]struct{}) + addrsSet := resolver.NewAddressMap() for _, a := range s.ResolverState.Addresses { - // Strip attributes from addresses before using them as map keys. So - // that when two addresses only differ in attributes pointers (but with - // the same attribute content), they are considered the same address. - // - // Note that this doesn't handle the case where the attribute content is - // different. So if users want to set different attributes to create - // duplicate connections to the same backend, it doesn't work. This is - // fine for now, because duplicate is done by setting Metadata today. - // - // TODO: read attributes to handle duplicate connections. - aNoAttrs := a - aNoAttrs.Attributes = nil - addrsSet[aNoAttrs] = struct{}{} - if scInfo, ok := b.subConns[aNoAttrs]; !ok { + addrsSet.Set(a, nil) + if _, ok := b.subConns.Get(a); !ok { // a is a new address (not existing in b.subConns). - // - // When creating SubConn, the original address with attributes is - // passed through. So that connection configurations in attributes - // (like creds) will be used. sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue } - b.subConns[aNoAttrs] = subConnInfo{subConn: sc, attrs: a.Attributes} + b.subConns.Set(a, sc) b.scStates[sc] = connectivity.Idle b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) sc.Connect() - } else { - // Always update the subconn's address in case the attributes - // changed. - // - // The SubConn does a reflect.DeepEqual of the new and old - // addresses. So this is a noop if the current address is the same - // as the old one (including attributes). - scInfo.attrs = a.Attributes - b.subConns[aNoAttrs] = scInfo - b.cc.UpdateAddresses(scInfo.subConn, []resolver.Address{a}) } } - for a, scInfo := range b.subConns { + for _, a := range b.subConns.Keys() { + sci, _ := b.subConns.Get(a) + sc := sci.(balancer.SubConn) // a was removed by resolver. - if _, ok := addrsSet[a]; !ok { - b.cc.RemoveSubConn(scInfo.subConn) - delete(b.subConns, a) + if _, ok := addrsSet.Get(a); !ok { + b.cc.RemoveSubConn(sc) + b.subConns.Delete(a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // The entry will be deleted in UpdateSubConnState. } @@ -193,10 +163,11 @@ func (b *baseBalancer) regeneratePicker() { readySCs := make(map[balancer.SubConn]SubConnInfo) // Filter out all ready SCs from full subConn map. - for addr, scInfo := range b.subConns { - if st, ok := b.scStates[scInfo.subConn]; ok && st == connectivity.Ready { - addr.Attributes = scInfo.attrs - readySCs[scInfo.subConn] = SubConnInfo{Address: addr} + for _, addr := range b.subConns.Keys() { + sci, _ := b.subConns.Get(addr) + sc := sci.(balancer.SubConn) + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[sc] = SubConnInfo{Address: addr} } } b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go index a24264a34..4ecfa1c21 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go @@ -39,7 +39,7 @@ type State struct { // Set returns a copy of the provided state with attributes containing s. s's // data should not be mutated after calling Set. func Set(state resolver.State, s *State) resolver.State { - state.Attributes = state.Attributes.WithValues(key, s) + state.Attributes = state.Attributes.WithValue(key, s) return state } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index f4ea61746..b1c23eaae 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -20,130 +20,178 @@ package grpc import ( "fmt" + "strings" "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) -// scStateUpdate contains the subConn and the new state it changed to. -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen synchronously and in order. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. +type ccBalancerWrapper struct { + cc *ClientConn -// exitIdle contains no data and is just a signal sent on the updateCh in -// ccBalancerWrapper to instruct the balancer to exit idle. -type exitIdle struct{} + // Since these fields are accessed only from handleXxx() methods which are + // synchronized by the watcher goroutine, we do not need a mutex to protect + // these fields. + balancer *gracefulswitch.Balancer + curBalancerName string -// ccBalancerWrapper is a wrapper on top of cc for balancers. -// It implements balancer.ClientConn interface. -type ccBalancerWrapper struct { - cc *ClientConn - balancerMu sync.Mutex // synchronizes calls to the balancer - balancer balancer.Balancer - hasExitIdle bool - updateCh *buffer.Unbounded - closed *grpcsync.Event - done *grpcsync.Event - - mu sync.Mutex - subConns map[*acBalancerWrapper]struct{} + updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). + resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. + closed *grpcsync.Event // Indicates if close has been called. + done *grpcsync.Event // Indicates if close has completed its work. } -func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { +// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer +// is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { ccb := &ccBalancerWrapper{ cc: cc, updateCh: buffer.NewUnbounded(), + resultCh: buffer.NewUnbounded(), closed: grpcsync.NewEvent(), done: grpcsync.NewEvent(), - subConns: make(map[*acBalancerWrapper]struct{}), } go ccb.watcher() - ccb.balancer = b.Build(ccb, bopts) - _, ccb.hasExitIdle = ccb.balancer.(balancer.ExitIdler) + ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } -// watcher balancer functions sequentially, so the balancer can be implemented -// lock-free. +// The following xxxUpdate structs wrap the arguments received as part of the +// corresponding update. The watcher goroutine uses the 'type' of the update to +// invoke the appropriate handler routine to handle the update. + +type ccStateUpdate struct { + ccs *balancer.ClientConnState +} + +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State + err error +} + +type exitIdleUpdate struct{} + +type resolverErrorUpdate struct { + err error +} + +type switchToUpdate struct { + name string +} + +type subConnUpdate struct { + acbw *acBalancerWrapper +} + +// watcher is a long-running goroutine which reads updates from a channel and +// invokes corresponding methods on the underlying balancer. It ensures that +// these methods are invoked in a synchronous fashion. It also ensures that +// these methods are invoked in the order in which the updates were received. func (ccb *ccBalancerWrapper) watcher() { for { select { - case t := <-ccb.updateCh.Get(): + case u := <-ccb.updateCh.Get(): ccb.updateCh.Load() if ccb.closed.HasFired() { break } - switch u := t.(type) { + switch update := u.(type) { + case *ccStateUpdate: + ccb.handleClientConnStateChange(update.ccs) case *scStateUpdate: - ccb.balancerMu.Lock() - ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err}) - ccb.balancerMu.Unlock() - case *acBalancerWrapper: - ccb.mu.Lock() - if ccb.subConns != nil { - delete(ccb.subConns, u) - ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain) - } - ccb.mu.Unlock() - case exitIdle: - if ccb.cc.GetState() == connectivity.Idle { - if ei, ok := ccb.balancer.(balancer.ExitIdler); ok { - // We already checked that the balancer implements - // ExitIdle before pushing the event to updateCh, but - // check conditionally again as defensive programming. - ccb.balancerMu.Lock() - ei.ExitIdle() - ccb.balancerMu.Unlock() - } - } + ccb.handleSubConnStateChange(update) + case *exitIdleUpdate: + ccb.handleExitIdle() + case *resolverErrorUpdate: + ccb.handleResolverError(update.err) + case *switchToUpdate: + ccb.handleSwitchTo(update.name) + case *subConnUpdate: + ccb.handleRemoveSubConn(update.acbw) default: - logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t) + logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) } case <-ccb.closed.Done(): } if ccb.closed.HasFired() { - ccb.balancerMu.Lock() - ccb.balancer.Close() - ccb.balancerMu.Unlock() - ccb.mu.Lock() - scs := ccb.subConns - ccb.subConns = nil - ccb.mu.Unlock() - ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) - ccb.done.Fire() - // Fire done before removing the addr conns. We can safely unblock - // ccb.close and allow the removeAddrConns to happen - // asynchronously. - for acbw := range scs { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) - } + ccb.handleClose() return } } } -func (ccb *ccBalancerWrapper) close() { - ccb.closed.Fire() - <-ccb.done.Done() +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. +// +// Unlike other methods invoked by grpc to push updates to the underlying +// balancer, this method cannot simply push the update onto the update channel +// and return. It needs to return the error returned by the underlying balancer +// back to grpc which propagates that to the resolver. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) + + var res interface{} + select { + case res = <-ccb.resultCh.Get(): + ccb.resultCh.Load() + case <-ccb.closed.Done(): + // Return early if the balancer wrapper is closed while we are waiting for + // the underlying balancer to process a ClientConnState update. + return nil + } + // If the returned error is nil, attempting to type assert to error leads to + // panic. So, this needs to handled separately. + if res == nil { + return nil + } + return res.(error) } -func (ccb *ccBalancerWrapper) exitIdle() bool { - if !ccb.hasExitIdle { - return false +// handleClientConnStateChange handles a ClientConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +// +// If the addresses specified in the update contain addresses of type "grpclb" +// and the selected LB policy is not "grpclb", these addresses will be filtered +// out and ccs will be modified with the updated address list. +func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { + if ccb.curBalancerName != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + var addrs []resolver.Address + for _, addr := range ccs.ResolverState.Addresses { + if addr.Type == resolver.GRPCLB { + continue + } + addrs = append(addrs, addr) + } + ccs.ResolverState.Addresses = addrs } - ccb.updateCh.Put(exitIdle{}) - return true + ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) } -func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { +// updateSubConnState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { // When updating addresses for a SubConn, if the address in use is not in // the new addresses, the old ac will be tearDown() and a new ac will be // created. tearDown() generates a state change with Shutdown state, we @@ -161,44 +209,125 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co }) } -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() - return ccb.balancer.UpdateClientConnState(*ccs) +// handleSubConnStateChange handles a SubConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { + ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) +} + +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.updateCh.Put(&exitIdleUpdate{}) +} + +func (ccb *ccBalancerWrapper) handleExitIdle() { + if ccb.cc.GetState() != connectivity.Idle { + return + } + ccb.balancer.ExitIdle() } func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() + ccb.updateCh.Put(&resolverErrorUpdate{err: err}) +} + +func (ccb *ccBalancerWrapper) handleResolverError(err error) { ccb.balancer.ResolverError(err) } +// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the +// LB policy identified by name. +// +// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the +// first good update from the name resolver, it determines the LB policy to use +// and invokes the switchTo() method. Upon receipt of every subsequent update +// from the name resolver, it invokes this method. +// +// the ccBalancerWrapper keeps track of the current LB policy name, and skips +// the graceful balancer switching process if the name does not change. +func (ccb *ccBalancerWrapper) switchTo(name string) { + ccb.updateCh.Put(&switchToUpdate{name: name}) +} + +// handleSwitchTo handles a balancer switch update from the update channel. It +// calls the SwitchTo() method on the gracefulswitch.Balancer with a +// balancer.Builder corresponding to name. If no balancer.Builder is registered +// for the given name, it uses the default LB policy which is "pick_first". +func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { + // TODO: Other languages use case-insensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + + // TODO: Ensure that name is a registered LB policy when we get here. + // We currently only validate the `loadBalancingConfig` field. We need to do + // the same for the `loadBalancingPolicy` field and reject the service config + // if the specified policy is not registered. + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) + } + + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return + } + ccb.curBalancerName = builder.Name() +} + +// handleRemoveSucConn handles a request from the underlying balancer to remove +// a subConn. +// +// See comments in RemoveSubConn() for more details. +func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) close() { + ccb.closed.Fire() + <-ccb.done.Done() +} + +func (ccb *ccBalancerWrapper) handleClose() { + ccb.balancer.Close() + ccb.done.Fire() +} + func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { if len(addrs) <= 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") - } ac, err := ccb.cc.newAddrConn(addrs, opts) if err != nil { + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } acbw := &acBalancerWrapper{ac: ac} acbw.ac.mu.Lock() ac.acbw = acbw acbw.ac.mu.Unlock() - ccb.subConns[acbw] = struct{}{} return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock - // during switchBalancer() if the old balancer calls RemoveSubConn() in its - // Close(). - ccb.updateCh.Put(sc) + // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it + // was required to handle the RemoveSubConn() method asynchronously by pushing + // the update onto the update channel. This was done to avoid a deadlock as + // switchBalancer() was holding cc.mu when calling Close() on the old + // balancer, which would in turn call RemoveSubConn(). + // + // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this + // asynchronously is probably not required anymore since the switchTo() method + // handles the balancer switch by pushing the update onto the channel. + // TODO(easwars): Handle this inline. + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { @@ -210,11 +339,6 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go new file mode 100644 index 000000000..a220c47c5 --- /dev/null +++ b/vendor/google.golang.org/grpc/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 34cc4c948..de6d41c23 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -23,6 +23,7 @@ import ( "errors" "fmt" "math" + "net/url" "reflect" "strings" "sync" @@ -37,7 +38,6 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/grpcutil" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -79,17 +79,17 @@ var ( // errNoTransportSecurity indicates that there is no transport security // being set for ClientConn. Users should either set one or explicitly // call WithInsecure DialOption to disable security. - errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") // errTransportCredsAndBundle indicates that creds bundle is used together // with other individual Transport Credentials. errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") - // errTransportCredentialsMissing indicates that users want to transmit security - // information (e.g., OAuth2 token) which requires secure connection on an insecure - // connection. + // errNoTransportCredsInBundle indicated that the configured creds bundle + // returned a transport credentials which was nil. + errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials") + // errTransportCredentialsMissing indicates that users want to transmit + // security information (e.g., OAuth2 token) which requires secure + // connection on an insecure connection. errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") - // errCredentialsConflict indicates that grpc.WithTransportCredentials() - // and grpc.WithInsecure() are both called for a connection. - errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") ) const ( @@ -159,35 +159,35 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if channelz.IsOn() { - if cc.dopts.channelzParentID != 0 { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), - Severity: channelz.CtInfo, - }, - }) - } else { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.Info(logger, cc.channelzID, "Channel Created") + pid := cc.dopts.channelzParentID + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) + ted := &channelz.TraceEventDesc{ + Desc: "Channel created", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), + Severity: channelz.CtInfo, } - cc.csMgr.channelzID = cc.channelzID } + channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) + cc.csMgr.channelzID = cc.channelzID - if !cc.dopts.insecure { - if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { - return nil, errNoTransportSecurity - } - if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { - return nil, errTransportCredsAndBundle - } - } else { - if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil { - return nil, errCredentialsConflict - } + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return nil, errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return nil, errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return nil, errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { for _, cd := range cc.dopts.copts.PerRPCCredentials { if cd.RequireTransportSecurity() { return nil, errTransportCredentialsMissing @@ -248,38 +248,15 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } // Determine the resolver to use. - cc.parsedTarget = grpcutil.ParseTarget(cc.target, cc.dopts.copts.Dialer != nil) - channelz.Infof(logger, cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) - resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) - if resolverBuilder == nil { - // If resolver builder is still nil, the parsed target's scheme is - // not registered. Fallback to default resolver and set Endpoint to - // the original target. - channelz.Infof(logger, cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) - cc.parsedTarget = resolver.Target{ - Scheme: resolver.GetDefaultScheme(), - Endpoint: target, - } - resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme) - if resolverBuilder == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme) - } + resolverBuilder, err := cc.parseTargetAndFindResolver() + if err != nil { + return nil, err } - - creds := cc.dopts.copts.TransportCredentials - if creds != nil && creds.Info().ServerName != "" { - cc.authority = creds.Info().ServerName - } else if cc.dopts.insecure && cc.dopts.authority != "" { - cc.authority = cc.dopts.authority - } else if strings.HasPrefix(cc.target, "unix:") || strings.HasPrefix(cc.target, "unix-abstract:") { - cc.authority = "localhost" - } else if strings.HasPrefix(cc.parsedTarget.Endpoint, ":") { - cc.authority = "localhost" + cc.parsedTarget.Endpoint - } else { - // Use endpoint from "scheme://authority/endpoint" as the default - // authority for ClientConn. - cc.authority = cc.parsedTarget.Endpoint + cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint, cc.target, cc.dopts) + if err != nil { + return nil, err } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) if cc.dopts.scChan != nil && !scSet { // Blocking wait for the initial service config. @@ -301,14 +278,15 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } - cc.balancerBuildOpts = balancer.BuildOptions{ + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ DialCreds: credsClone, CredsBundle: cc.dopts.copts.CredsBundle, Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParentID: cc.channelzID, Target: cc.parsedTarget, - } + }) // Build the resolver. rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) @@ -417,7 +395,7 @@ type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} - channelzID int64 + channelzID *channelz.Identifier } // updateState updates the connectivity.State of ClientConn. @@ -483,34 +461,36 @@ var _ ClientConnInterface = (*ClientConn)(nil) // handshakes. It also handles errors on established connections by // re-resolving the name and reconnecting. type ClientConn struct { - ctx context.Context - cancel context.CancelFunc - - target string - parsedTarget resolver.Target - authority string - dopts dialOptions - csMgr *connectivityStateManager - - balancerBuildOpts balancer.BuildOptions - blockingpicker *pickerWrapper - + ctx context.Context // Initialized using the background context at dial time. + cancel context.CancelFunc // Cancelled on close. + + // The following are initialized at dial time, and are read-only after that. + target string // User's dial target. + parsedTarget resolver.Target // See parseTargetAndFindResolver(). + authority string // See determineAuthority(). + dopts dialOptions // Default and user specified dial options. + channelzID *channelz.Identifier // Channelz identifier for the channel. + balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. + csMgr *connectivityStateManager + blockingpicker *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector + czData *channelzData + retryThrottler atomic.Value // Updated from service config. - mu sync.RWMutex - resolverWrapper *ccResolverWrapper - sc *ServiceConfig - conns map[*addrConn]struct{} - // Keepalive parameter can be updated if a GoAway is received. - mkp keepalive.ClientParameters - curBalancerName string - balancerWrapper *ccBalancerWrapper - retryThrottler atomic.Value - + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. firstResolveEvent *grpcsync.Event - channelzID int64 // channelz unique identification number - czData *channelzData + // mu protects the following fields. + // TODO: split mu so the same mutex isn't used for everything. + mu sync.RWMutex + resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + sc *ServiceConfig // Latest service config received from the resolver. + conns map[*addrConn]struct{} // Set to nil on close. + mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error @@ -555,14 +535,7 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.balancerWrapper != nil && cc.balancerWrapper.exitIdle() { - return - } - for ac := range cc.conns { - go ac.connect() - } + cc.balancerWrapper.exitIdle() } func (cc *ClientConn) scWatcher() { @@ -642,9 +615,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { // with the new addresses. cc.maybeApplyDefaultServiceConfig(nil) - if cc.balancerWrapper != nil { - cc.balancerWrapper.resolverError(err) - } + cc.balancerWrapper.resolverError(err) // No addresses are valid with err set; return early. cc.mu.Unlock() @@ -652,7 +623,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var ret error - if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { + if cc.dopts.disableServiceConfig { + channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) + cc.maybeApplyDefaultServiceConfig(s.Addresses) + } else if s.ServiceConfig == nil { cc.maybeApplyDefaultServiceConfig(s.Addresses) // TODO: do we need to apply a failing LB policy if there is no // default, per the error handling design? @@ -669,16 +643,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) } else { ret = balancer.ErrBadResolverState - if cc.balancerWrapper == nil { - var err error - if s.ServiceConfig.Err != nil { - err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) - } else { - err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) - } - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) - cc.csMgr.updateState(connectivity.TransientFailure) + if cc.sc == nil { + // Apply the failing LB only if we haven't received valid service config + // from the name resolver in the past. + cc.applyFailingLB(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -686,24 +654,12 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var balCfg serviceconfig.LoadBalancingConfig - if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc != nil && cc.sc.lbConfig != nil { balCfg = cc.sc.lbConfig.cfg } - - cbn := cc.curBalancerName bw := cc.balancerWrapper cc.mu.Unlock() - if cbn != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - for i := 0; i < len(s.Addresses); { - if s.Addresses[i].Type == resolver.GRPCLB { - copy(s.Addresses[i:], s.Addresses[i+1:]) - s.Addresses = s.Addresses[:len(s.Addresses)-1] - continue - } - i++ - } - } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) if ret == nil { ret = uccsErr // prefer ErrBadResolver state since any other error is @@ -712,56 +668,28 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// switchBalancer starts the switching from current balancer to the balancer -// with the given name. -// -// It will NOT send the current address list to the new balancer. If needed, -// caller of this function should send address list to the new balancer after -// this function returns. +// applyFailingLB is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. // // Caller must hold cc.mu. -func (cc *ClientConn) switchBalancer(name string) { - if strings.EqualFold(cc.curBalancerName, name) { - return - } - - channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) - if cc.dopts.balancerBuilder != nil { - channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") - return - } - if cc.balancerWrapper != nil { - // Don't hold cc.mu while closing the balancers. The balancers may call - // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex - // would cause a deadlock in that case. - cc.mu.Unlock() - cc.balancerWrapper.close() - cc.mu.Lock() - } - - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) - channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) - builder = newPickfirstBuilder() +func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { + var err error + if sc.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) } else { - channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } - - cc.curBalancerName = builder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) } func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return - } - // TODO(bar switching) send updates to all balancer wrappers when balancer - // gracefully switching is supported. - cc.balancerWrapper.handleSubConnStateChange(sc, s, err) - cc.mu.Unlock() + cc.balancerWrapper.updateSubConnState(sc, s, err) } // newAddrConn creates an addrConn for addrs and adds it to cc.conns. @@ -784,17 +712,21 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub cc.mu.Unlock() return nil, ErrClientConnClosing } - if channelz.IsOn() { - ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) + + var err error + ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") + if err != nil { + return nil, err } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), + Severity: channelz.CtInfo, + }, + }) + cc.conns[ac] = struct{}{} cc.mu.Unlock() return ac, nil @@ -869,16 +801,31 @@ func (ac *addrConn) connect() error { return nil } +func equalAddresses(a, b []resolver.Address) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if !v.Equal(b[i]) { + return false + } + } + return true +} + // tryUpdateAddrs tries to update ac.addrs with the new addresses list. // -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// // If ac is TransientFailure, it updates ac.addrs and returns true. The updated // addresses will be picked up by retry in the next iteration after backoff. // // If ac is Shutdown or Idle, it updates ac.addrs and returns true. // +// If the addresses is the same as the old list, it does nothing and returns +// true. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// // If ac is Ready, it checks whether current connected address of ac is in the // new addrs list. // - If true, it updates ac.addrs and returns true. The ac will keep using @@ -895,6 +842,10 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { return true } + if equalAddresses(ac.addrs, addrs) { + return true + } + if ac.state == connectivity.Connecting { return false } @@ -902,10 +853,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { // ac.state is Ready, try to find the connected address. var curAddrFound bool for _, a := range addrs { - // a.ServerName takes precedent over ClientConn authority, if present. - if a.ServerName == "" { - a.ServerName = ac.cc.authority - } + a.ServerName = ac.cc.getServerName(a) if reflect.DeepEqual(ac.curAddr, a) { curAddrFound = true break @@ -919,6 +867,26 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { return curAddrFound } +// getServerName determines the serverName to be used in the connection +// handshake. The default value for the serverName is the authority on the +// ClientConn, which either comes from the user's dial target or through an +// authority override specified using the WithAuthority dial option. Name +// resolvers can specify a per-address override for the serverName through the +// resolver.Address.ServerName field which is used only if the WithAuthority +// dial option was not used. The rationale is that per-address authority +// overrides specified by the name resolver can represent a security risk, while +// an override specified by the user is more dependable since they probably know +// what they are doing. +func (cc *ClientConn) getServerName(addr resolver.Address) string { + if cc.dopts.authority != "" { + return cc.dopts.authority + } + if addr.ServerName != "" { + return addr.ServerName + } + return cc.authority +} + func getMethodConfig(sc *ServiceConfig, method string) MethodConfig { if sc == nil { return MethodConfig{} @@ -958,14 +926,10 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) - if err != nil { - return nil, nil, toRPCErr(err) - } - return t, done, nil } func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { @@ -990,35 +954,26 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel cc.retryThrottler.Store((*retryThrottler)(nil)) } - if cc.dopts.balancerBuilder == nil { - // Only look at balancer types and switch balancer if balancer dial - // option is not set. - var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break } } - cc.switchBalancer(newBalancerName) - } else if cc.balancerWrapper == nil { - // Balancer dial option was set, and this is the first time handling - // resolved addresses. Build a balancer with dopts.balancerBuilder. - cc.curBalancerName = cc.dopts.balancerBuilder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } } + cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { @@ -1069,11 +1024,11 @@ func (cc *ClientConn) Close() error { rWrapper := cc.resolverWrapper cc.resolverWrapper = nil bWrapper := cc.balancerWrapper - cc.balancerWrapper = nil cc.mu.Unlock() + // The order of closing matters here since the balancer wrapper assumes the + // picker is closed before it is closed. cc.blockingpicker.close() - if bWrapper != nil { bWrapper.close() } @@ -1084,22 +1039,22 @@ func (cc *ClientConn) Close() error { for ac := range conns { ac.tearDown(ErrClientConnClosing) } - if channelz.IsOn() { - ted := &channelz.TraceEventDesc{ - Desc: "Channel Deleted", + ted := &channelz.TraceEventDesc{ + Desc: "Channel deleted", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), Severity: channelz.CtInfo, } - if cc.dopts.channelzParentID != 0 { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(cc.channelzID) } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from being + // deleted right away. + channelz.RemoveEntry(cc.channelzID) + return nil } @@ -1129,7 +1084,7 @@ type addrConn struct { backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} - channelzID int64 // channelz unique identification number. + channelzID *channelz.Identifier czData *channelzData } @@ -1275,11 +1230,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne prefaceReceived := grpcsync.NewEvent() connClosed := grpcsync.NewEvent() - // addr.ServerName takes precedent over ClientConn authority, if present. - if addr.ServerName == "" { - addr.ServerName = ac.cc.authority - } - + addr.ServerName = ac.cc.getServerName(addr) hctx, hcancel := context.WithCancel(ac.ctx) hcStarted := false // protected by ac.mu @@ -1287,6 +1238,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne ac.mu.Lock() defer ac.mu.Unlock() defer connClosed.Fire() + defer hcancel() if !hcStarted || hctx.Err() != nil { // We didn't start the health check or set the state to READY, so // no need to do anything else here. @@ -1297,7 +1249,6 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // state, since there may be a new transport in this addrConn. return } - hcancel() ac.transport = nil // Refresh the name resolver ac.cc.resolveNow(resolver.ResolveNowOptions{}) @@ -1315,14 +1266,13 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) defer cancel() - if channelz.IsOn() { - copts.ChannelzParentID = ac.channelzID - } + copts.ChannelzParentID = ac.channelzID newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v", addr, err) + hcancel() + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) return err } @@ -1335,7 +1285,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne newTr.Close(transport.ErrConnClosing) if connectCtx.Err() == context.DeadlineExceeded { err := errors.New("failed to receive server preface within timeout") - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err) + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err) return err } return nil @@ -1500,19 +1450,18 @@ func (ac *addrConn) tearDown(err error) { curTr.GracefulClose() ac.mu.Lock() } - if channelz.IsOn() { - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Deleted", + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(ac.channelzID) - } + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from + // being deleted right away. + channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() } @@ -1621,3 +1570,114 @@ func (cc *ClientConn) connectionError() error { defer cc.lceMu.Unlock() return cc.lastConnectionError } + +func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { + channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) + + var rb resolver.Builder + parsedTarget, err := parseTarget(cc.target) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) + } else { + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.Scheme) + if rb != nil { + cc.parsedTarget = parsedTarget + return rb, nil + } + } + + // We are here because the user's dial target did not contain a scheme or + // specified an unregistered scheme. We should fallback to the default + // scheme, except when a custom dialer is specified in which case, we should + // always use passthrough scheme. + defScheme := resolver.GetDefaultScheme() + channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme) + canonicalTarget := defScheme + ":///" + cc.target + + parsedTarget, err = parseTarget(canonicalTarget) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) + return nil, err + } + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.Scheme) + if rb == nil { + return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.Scheme) + } + cc.parsedTarget = parsedTarget + return rb, nil +} + +// parseTarget uses RFC 3986 semantics to parse the given target into a +// resolver.Target struct containing scheme, authority and endpoint. Query +// params are stripped from the endpoint. +func parseTarget(target string) (resolver.Target, error) { + u, err := url.Parse(target) + if err != nil { + return resolver.Target{}, err + } + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field instead of the `Endpoint` field. + endpoint := u.Path + if endpoint == "" { + endpoint = u.Opaque + } + endpoint = strings.TrimPrefix(endpoint, "/") + return resolver.Target{ + Scheme: u.Scheme, + Authority: u.Host, + Endpoint: endpoint, + URL: *u, + }, nil +} + +// Determine channel authority. The order of precedence is as follows: +// - user specified authority override using `WithAuthority` dial option +// - creds' notion of server name for the authentication handshake +// - endpoint from dial target of the form "scheme://[authority]/endpoint" +func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) { + // Historically, we had two options for users to specify the serverName or + // authority for a channel. One was through the transport credentials + // (either in its constructor, or through the OverrideServerName() method). + // The other option (for cases where WithInsecure() dial option was used) + // was to use the WithAuthority() dial option. + // + // A few things have changed since: + // - `insecure` package with an implementation of the `TransportCredentials` + // interface for the insecure case + // - WithAuthority() dial option support for secure credentials + authorityFromCreds := "" + if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" { + authorityFromCreds = creds.Info().ServerName + } + authorityFromDialOption := dopts.authority + if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { + return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + } + + switch { + case authorityFromDialOption != "": + return authorityFromDialOption, nil + case authorityFromCreds != "": + return authorityFromCreds, nil + case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): + // TODO: remove when the unix resolver implements optional interface to + // return channel authority. + return "localhost", nil + case strings.HasPrefix(endpoint, ":"): + return "localhost" + endpoint, nil + default: + // TODO: Define an optional interface on the resolver builder to return + // the channel authority given the user's dial target. For resolvers + // which don't implement this interface, we will use the endpoint from + // "scheme://authority/endpoint" as the default authority. + return endpoint, nil + } +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 7eee7e4ec..96ff1877e 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -140,6 +140,11 @@ type TransportCredentials interface { // Additionally, ClientHandshakeInfo data will be available via the context // passed to this call. // + // The second argument to this method is the `:authority` header value used + // while creating new streams on this connection after authentication + // succeeds. Implementations must use this as the server name during the + // authentication handshake. + // // If the returned net.Conn is closed, it MUST close the net.Conn provided. ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) // ServerHandshake does the authentication handshake for servers. It returns @@ -153,9 +158,13 @@ type TransportCredentials interface { Info() ProtocolInfo // Clone makes a copy of this TransportCredentials. Clone() TransportCredentials - // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. - // gRPC internals also use it to override the virtual hosting name if it is set. - // It must be called before dialing. Currently, this is only used by grpclb. + // OverrideServerName specifies the value used for the following: + // - verifying the hostname on the returned certificates + // - as SNI in the client's handshake to support virtual hosting + // - as the value for `:authority` header at stream creation time + // + // Deprecated: use grpc.WithAuthority instead. Will be supported + // throughout 1.x. OverrideServerName(string) error } @@ -169,8 +178,18 @@ type TransportCredentials interface { // // This API is experimental. type Bundle interface { + // TransportCredentials returns the transport credentials from the Bundle. + // + // Implementations must return non-nil transport credentials. If transport + // security is not needed by the Bundle, implementations may choose to + // return insecure.NewCredentials(). TransportCredentials() TransportCredentials + + // PerRPCCredentials returns the per-RPC credentials from the Bundle. + // + // May be nil if per-RPC credentials are not needed. PerRPCCredentials() PerRPCCredentials + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the // existing Bundle may cause races. // diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go new file mode 100644 index 000000000..82bee1443 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -0,0 +1,98 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package insecure provides an implementation of the +// credentials.TransportCredentials interface which disables transport security. +package insecure + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// NewCredentials returns a credentials which disables transport security. +// +// Note that using this credentials with per-RPC credentials which require +// transport security is incompatible and will cause grpc.Dial() to fail. +func NewCredentials() credentials.TransportCredentials { + return insecureTC{} +} + +// insecureTC implements the insecure transport credentials. The handshake +// methods simply return the passed in net.Conn and set the security level to +// NoSecurity. +type insecureTC struct{} + +func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{SecurityProtocol: "insecure"} +} + +func (insecureTC) Clone() credentials.TransportCredentials { + return insecureTC{} +} + +func (insecureTC) OverrideServerName(string) error { + return nil +} + +// info contains the auth information for an insecure connection. +// It implements the AuthInfo interface. +type info struct { + credentials.CommonAuthInfo +} + +// AuthType returns the type of info as a string. +func (info) AuthType() string { + return "insecure" +} + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { + return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { + return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { + return NewCredentials() +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 7a497237b..f2f605a17 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -20,16 +20,15 @@ package grpc import ( "context" - "fmt" "net" "time" "google.golang.org/grpc/backoff" - "google.golang.org/grpc/balancer" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" internalbackoff "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -45,20 +44,17 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor - dc Decompressor - bs internalbackoff.Strategy - block bool - returnLastError bool - insecure bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by WithBalancerName dial option. - balancerBuilder balancer.Builder - channelzParentID int64 + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + copts transport.ConnectOptions + callOptions []CallOption + channelzParentID *channelz.Identifier disableServiceConfig bool disableRetry bool disableHealthCheck bool @@ -196,25 +192,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithBalancerName sets the balancer that the ClientConn will be initialized -// with. Balancer registered with balancerName will be used. This function -// panics if no balancer was registered by balancerName. -// -// The balancer cannot be overridden by balancer option specified by service -// config. -// -// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig -// instead. Will be removed in a future 1.x release. -func WithBalancerName(balancerName string) DialOption { - builder := balancer.Get(balancerName) - if builder == nil { - panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) - } - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = builder - }) -} - // WithServiceConfig returns a DialOption which has a channel to read the // service configuration. // @@ -228,18 +205,14 @@ func WithServiceConfig(c <-chan ServiceConfig) DialOption { }) } -// WithConnectParams configures the dialer to use the provided ConnectParams. +// WithConnectParams configures the ClientConn to use the provided ConnectParams +// for creating and maintaining connections to servers. // // The backoff configuration specified as part of the ConnectParams overrides // all defaults specified in // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider // using the backoff.DefaultConfig as a base, in cases where you want to // override only a subset of the backoff configuration. -// -// Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func WithConnectParams(p ConnectParams) DialOption { return newFuncDialOption(func(o *dialOptions) { o.bs = internalbackoff.Exponential{Config: p.Backoff} @@ -277,7 +250,7 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { }) } -// WithBlock returns a DialOption which makes caller of Dial blocks until the +// WithBlock returns a DialOption which makes callers of Dial block until the // underlying connection is up. Without this, Dial returns immediately and // connecting the server happens in background. func WithBlock() DialOption { @@ -303,11 +276,17 @@ func WithReturnConnectionError() DialOption { } // WithInsecure returns a DialOption which disables transport security for this -// ClientConn. Note that transport security is required unless WithInsecure is -// set. +// ClientConn. Under the hood, it uses insecure.NewCredentials(). +// +// Note that using this DialOption with per-RPC credentials (through +// WithCredentialsBundle or WithPerRPCCredentials) which require transport +// security is incompatible and will cause grpc.Dial() to fail. +// +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. func WithInsecure() DialOption { return newFuncDialOption(func(o *dialOptions) { - o.insecure = true + o.copts.TransportCredentials = insecure.NewCredentials() }) } @@ -482,8 +461,7 @@ func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOpt } // WithAuthority returns a DialOption that specifies the value to be used as the -// :authority pseudo-header. This value only works with WithInsecure and has no -// effect if TransportCredentials are present. +// :authority pseudo-header and as the server name in authentication handshake. func WithAuthority(a string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.authority = a @@ -498,7 +476,7 @@ func WithAuthority(a string) DialOption { // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. -func WithChannelzParentID(id int64) DialOption { +func WithChannelzParentID(id *channelz.Identifier) DialOption { return newFuncDialOption(func(o *dialOptions) { o.channelzParentID = id }) @@ -519,14 +497,16 @@ func WithDisableServiceConfig() DialOption { // WithDefaultServiceConfig returns a DialOption that configures the default // service config, which will be used in cases where: // -// 1. WithDisableServiceConfig is also used. -// 2. Resolver does not return a service config or if the resolver returns an -// invalid service config. +// 1. WithDisableServiceConfig is also used, or // -// Experimental +// 2. The name resolver does not provide a service config or provides an +// invalid service config. // -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// The parameter s is the JSON representation of the default service config. +// For more information about service configs, see: +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +// For a simple example of usage, see: +// examples/features/load_balancing/client/main.go func WithDefaultServiceConfig(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.defaultServiceConfigRawJSON = &s @@ -538,14 +518,8 @@ func WithDefaultServiceConfig(s string) DialOption { // will happen automatically if no data is written to the wire or if the RPC is // unprocessed by the remote server. // -// Retry support is currently disabled by default, but will be enabled by -// default in the future. Until then, it may be enabled by setting the -// environment variable "GRPC_GO_RETRY" to "on". -// -// Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Retry support is currently enabled by default, but may be disabled by +// setting the environment variable "GRPC_GO_RETRY" to "off". func WithDisableRetry() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableRetry = true @@ -585,7 +559,6 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { func defaultDialOptions() dialOptions { return dialOptions{ - disableRetry: !envconfig.Retry, healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ WriteBufferSize: defaultWriteBufSize, diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 6d84f74c7..18e530fc9 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -108,7 +108,7 @@ var registeredCodecs = make(map[string]Codec) // more details. // // NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Compressors are +// an init() function), and is not thread-safe. If multiple Codecs are // registered with the same name, the one registered last will take effect. func RegisterCodec(codec Codec) { if codec == nil { diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 4ee33171e..7c1f66409 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -19,11 +19,14 @@ package grpclog import ( + "encoding/json" + "fmt" "io" "io/ioutil" "log" "os" "strconv" + "strings" "google.golang.org/grpc/internal/grpclog" ) @@ -95,8 +98,9 @@ var severityName = []string{ // loggerT is the default logger used by grpclog. type loggerT struct { - m []*log.Logger - v int + m []*log.Logger + v int + jsonFormat bool } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -105,19 +109,32 @@ type loggerT struct { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) +} + +type loggerV2Config struct { + verbose int + jsonFormat bool +} + +func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { var m []*log.Logger - m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + flag := log.LstdFlags + if c.jsonFormat { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) - m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) - return &loggerT{m: m, v: v} + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} } // newLoggerV2 creates a loggerV2 to be used as default logger. @@ -142,58 +159,79 @@ func newLoggerV2() LoggerV2 { if vl, err := strconv.Atoi(vLevel); err == nil { v = vl } - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) + + jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") + + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ + verbose: v, + jsonFormat: jsonFormat, + }) +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) } func (g *loggerT) Info(args ...interface{}) { - g.m[infoLog].Print(args...) + g.output(infoLog, fmt.Sprint(args...)) } func (g *loggerT) Infoln(args ...interface{}) { - g.m[infoLog].Println(args...) + g.output(infoLog, fmt.Sprintln(args...)) } func (g *loggerT) Infof(format string, args ...interface{}) { - g.m[infoLog].Printf(format, args...) + g.output(infoLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Warning(args ...interface{}) { - g.m[warningLog].Print(args...) + g.output(warningLog, fmt.Sprint(args...)) } func (g *loggerT) Warningln(args ...interface{}) { - g.m[warningLog].Println(args...) + g.output(warningLog, fmt.Sprintln(args...)) } func (g *loggerT) Warningf(format string, args ...interface{}) { - g.m[warningLog].Printf(format, args...) + g.output(warningLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Error(args ...interface{}) { - g.m[errorLog].Print(args...) + g.output(errorLog, fmt.Sprint(args...)) } func (g *loggerT) Errorln(args ...interface{}) { - g.m[errorLog].Println(args...) + g.output(errorLog, fmt.Sprintln(args...)) } func (g *loggerT) Errorf(format string, args ...interface{}) { - g.m[errorLog].Printf(format, args...) + g.output(errorLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Fatal(args ...interface{}) { - g.m[fatalLog].Fatal(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) } func (g *loggerT) Fatalln(args ...interface{}) { - g.m[fatalLog].Fatalln(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) } func (g *loggerT) Fatalf(format string, args ...interface{}) { - g.m[fatalLog].Fatalf(format, args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) } func (g *loggerT) V(l int) bool { @@ -210,12 +248,12 @@ func (g *loggerT) V(l int) bool { // later release. type DepthLoggerV2 interface { LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...interface{}) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. WarningDepth(depth int, args ...interface{}) - // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. ErrorDepth(depth int, args ...interface{}) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...interface{}) } diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index 668e0adcf..bb96ef57b 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -72,9 +72,12 @@ type UnaryServerInfo struct { } // UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal -// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the -// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as -// the status message of the RPC. +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 000000000..7ba8f4d18 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,382 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { + return &Balancer{ + cc: cc, + bOpts: opts, + } +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { + bOpts balancer.BuildOptions + cc balancer.ClientConn + + // mu protects the following fields and all fields within balancerCurrent + // and balancerPending. mu does not need to be held when calling into the + // child balancers, as all calls into these children happen only as a direct + // result of a call into the gracefulSwitchBalancer, which are also + // guaranteed to be synchronous. There is one exception: an UpdateState call + // from a child balancer when current and pending are populated can lead to + // calling Close() on the current. To prevent that racing with an + // UpdateSubConnState from the channel, we hold currentMu during Close and + // UpdateSubConnState calls. + mu sync.Mutex + balancerCurrent *balancerWrapper + balancerPending *balancerWrapper + closed bool // set to true when this balancer is closed + + // currentMu must be locked before mu. This mutex guards against this + // sequence of events: UpdateSubConnState() called, finds the + // balancerCurrent, gives up lock, updateState comes in, causes Close() on + // balancerCurrent before the UpdateSubConnState is called on the + // balancerCurrent. + currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { + gsb.cc.UpdateState(gsb.balancerPending.lastState) + cur := gsb.balancerCurrent + gsb.balancerCurrent = gsb.balancerPending + gsb.balancerPending = nil + go func() { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + cur.Close() + }() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { + return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + gsb.mu.Lock() + if gsb.closed { + gsb.mu.Unlock() + return errBalancerClosed + } + bw := &balancerWrapper{ + gsb: gsb, + lastState: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + subconns: make(map[balancer.SubConn]bool), + } + balToClose := gsb.balancerPending // nil if there is no pending balancer + if gsb.balancerCurrent == nil { + gsb.balancerCurrent = bw + } else { + gsb.balancerPending = bw + } + gsb.mu.Unlock() + balToClose.Close() + // This function takes a builder instead of a balancer because builder.Build + // can call back inline, and this utility needs to handle the callbacks. + newBalancer := builder.Build(bw, gsb.bOpts) + if newBalancer == nil { + // This is illegal and should never happen; we clear the balancerWrapper + // we were constructing if it happens to avoid a potential panic. + gsb.mu.Lock() + if gsb.balancerPending != nil { + gsb.balancerPending = nil + } else { + gsb.balancerCurrent = nil + } + gsb.mu.Unlock() + return balancer.ErrBadResolverState + } + + // This write doesn't need to take gsb.mu because this field never gets read + // or written to on any calls from the current or pending. Calls from grpc + // to this balancer are guaranteed to be called synchronously, so this + // bw.Balancer field will never be forwarded to until this SwitchTo() + // function returns. + bw.Balancer = newBalancer + return nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { + gsb.mu.Lock() + defer gsb.mu.Unlock() + if gsb.balancerPending != nil { + return gsb.balancerPending + } + return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return errBalancerClosed + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. +func (gsb *Balancer) ExitIdle() { + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + for sc := range balToUpdate.subconns { + sc.Connect() + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() + // Forward update to the appropriate child. Even if there is a pending + // balancer, the current balancer should continue to get SubConn updates to + // maintain the proper state while the pending is still connecting. + var balToUpdate *balancerWrapper + if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { + balToUpdate = gsb.balancerCurrent + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } + gsb.mu.Unlock() + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. + return + } + balToUpdate.UpdateSubConnState(sc, state) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { + gsb.mu.Lock() + gsb.closed = true + currentBalancerToClose := gsb.balancerCurrent + gsb.balancerCurrent = nil + pendingBalancerToClose := gsb.balancerPending + gsb.balancerPending = nil + gsb.mu.Unlock() + + currentBalancerToClose.Close() + pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/remove +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { + balancer.Balancer + gsb *Balancer + + lastState balancer.State + subconns map[balancer.SubConn]bool // subconns created by this balancer +} + +func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + if state.ConnectivityState == connectivity.Shutdown { + bw.gsb.mu.Lock() + delete(bw.subconns, sc) + bw.gsb.mu.Unlock() + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + bw.Balancer.UpdateSubConnState(sc, state) +} + +// Close closes the underlying LB policy and removes the subconns it created. bw +// must not be referenced via balancerCurrent or balancerPending in gsb when +// called. gsb.mu must not be held. Does not panic with a nil receiver. +func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { + return + } + // There is no need to protect this read with a mutex, as Close() is + // impossible to be called concurrently with the write in SwitchTo(). The + // callsites of Close() for this balancer in Graceful Switch Balancer will + // never be called until SwitchTo() returns. + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { + bw.gsb.cc.RemoveSubConn(sc) + } + bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + // Hold the mutex for this entire call to ensure it cannot occur + // concurrently with other updateState() calls. This causes updates to + // lastState and calls to cc.UpdateState to happen atomically. + bw.gsb.mu.Lock() + defer bw.gsb.mu.Unlock() + bw.lastState = state + + if !bw.gsb.balancerCurrentOrPending(bw) { + return + } + + if bw == bw.gsb.balancerCurrent { + // In the case that the current balancer exits READY, and there is a pending + // balancer, you can forward the pending balancer's cached State up to + // ClientConn and swap the pending into the current. This is because there + // is no reason to gracefully switch from and keep using the old policy as + // the ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { + bw.gsb.swap() + return + } + // Even if there is a pending balancer waiting to be gracefully switched to, + // continue to forward current balancer updates to the Client Conn. Ignoring + // state + picker from the current would cause undefined behavior/cause the + // system to behave incorrectly from the current LB policies perspective. + // Also, the current LB is still being used by grpc to choose SubConns per + // RPC, and thus should use the most updated form of the current balancer. + bw.gsb.cc.UpdateState(state) + return + } + // This method is now dealing with a state update from the pending balancer. + // If the current balancer is currently in a state other than READY, the new + // policy can be swapped into place immediately. This is because there is no + // reason to gracefully switch from and keep using the old policy as the + // ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { + bw.gsb.swap() + } +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.gsb.mu.Unlock() + + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call + bw.gsb.cc.RemoveSubConn(sc) + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.subconns[sc] = true + bw.gsb.mu.Unlock() + return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + // Ignore ResolveNow requests from anything other than the most recent + // balancer, because older balancers were already removed from the config. + if bw != bw.gsb.latestBalancer() { + return + } + bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.RemoveSubConn(sc) +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { + return bw.gsb.cc.Target() +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index 5cc3aeddb..0a25ce43f 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -31,7 +31,7 @@ import ( // Logger is the global binary logger. It can be used to get binary logger for // each method. type Logger interface { - getMethodLogger(methodName string) *MethodLogger + GetMethodLogger(methodName string) MethodLogger } // binLogger is the global binary logger for the binary. One of this should be @@ -49,17 +49,24 @@ func SetLogger(l Logger) { binLogger = l } +// GetLogger gets the binarg logger. +// +// Only call this at init time. +func GetLogger() Logger { + return binLogger +} + // GetMethodLogger returns the methodLogger for the given methodName. // // methodName should be in the format of "/service/method". // // Each methodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func GetMethodLogger(methodName string) *MethodLogger { +func GetMethodLogger(methodName string) MethodLogger { if binLogger == nil { return nil } - return binLogger.getMethodLogger(methodName) + return binLogger.GetMethodLogger(methodName) } func init() { @@ -68,17 +75,29 @@ func init() { binLogger = NewLoggerFromConfigString(configStr) } -type methodLoggerConfig struct { +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { // Max length of header and message. - hdr, msg uint64 + Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { + All *MethodLoggerConfig + Services map[string]*MethodLoggerConfig + Methods map[string]*MethodLoggerConfig + + Blacklist map[string]struct{} } type logger struct { - all *methodLoggerConfig - services map[string]*methodLoggerConfig - methods map[string]*methodLoggerConfig + config LoggerConfig +} - blacklist map[string]struct{} +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { + return &logger{config: config} } // newEmptyLogger creates an empty logger. The map fields need to be filled in @@ -88,57 +107,57 @@ func newEmptyLogger() *logger { } // Set method logger for "*". -func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { - if l.all != nil { +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { + if l.config.All != nil { return fmt.Errorf("conflicting global rules found") } - l.all = ml + l.config.All = ml return nil } // Set method logger for "service/*". // // New methodLogger with same service overrides the old one. -func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { - if _, ok := l.services[service]; ok { +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Services[service]; ok { return fmt.Errorf("conflicting service rules for service %v found", service) } - if l.services == nil { - l.services = make(map[string]*methodLoggerConfig) + if l.config.Services == nil { + l.config.Services = make(map[string]*MethodLoggerConfig) } - l.services[service] = ml + l.config.Services[service] = ml return nil } // Set method logger for "service/method". // // New methodLogger with same method overrides the old one. -func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { - if _, ok := l.blacklist[method]; ok { +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.methods == nil { - l.methods = make(map[string]*methodLoggerConfig) + if l.config.Methods == nil { + l.config.Methods = make(map[string]*MethodLoggerConfig) } - l.methods[method] = ml + l.config.Methods[method] = ml return nil } // Set blacklist method for "-service/method". func (l *logger) setBlacklist(method string) error { - if _, ok := l.blacklist[method]; ok { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.blacklist == nil { - l.blacklist = make(map[string]struct{}) + if l.config.Blacklist == nil { + l.config.Blacklist = make(map[string]struct{}) } - l.blacklist[method] = struct{}{} + l.config.Blacklist[method] = struct{}{} return nil } @@ -148,23 +167,23 @@ func (l *logger) setBlacklist(method string) error { // // Each methodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func (l *logger) getMethodLogger(methodName string) *MethodLogger { +func (l *logger) GetMethodLogger(methodName string) MethodLogger { s, m, err := grpcutil.ParseMethod(methodName) if err != nil { grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil } - if ml, ok := l.methods[s+"/"+m]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Methods[s+"/"+m]; ok { + return newMethodLogger(ml.Header, ml.Message) } - if _, ok := l.blacklist[s+"/"+m]; ok { + if _, ok := l.config.Blacklist[s+"/"+m]; ok { return nil } - if ml, ok := l.services[s]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Services[s]; ok { + return newMethodLogger(ml.Header, ml.Message) } - if l.all == nil { + if l.config.All == nil { return nil } - return newMethodLogger(l.all.hdr, l.all.msg) + return newMethodLogger(l.config.All.Header, l.config.All.Message) } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index d8f4e7602..ab589a76b 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -89,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { if err != nil { return fmt.Errorf("invalid config: %q, %v", config, err) } - if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } return nil @@ -104,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) } if m == "*" { - if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } else { - if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 0cdb41831..24df0a1a0 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -48,7 +48,11 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. -type MethodLogger struct { +type MethodLogger interface { + Log(LogEntryConfig) +} + +type methodLogger struct { headerMaxLen, messageMaxLen uint64 callID uint64 @@ -57,8 +61,8 @@ type MethodLogger struct { sink Sink // TODO(blog): make this plugable. } -func newMethodLogger(h, m uint64) *MethodLogger { - return &MethodLogger{ +func newMethodLogger(h, m uint64) *methodLogger { + return &methodLogger{ headerMaxLen: h, messageMaxLen: m, @@ -69,8 +73,10 @@ func newMethodLogger(h, m uint64) *MethodLogger { } } -// Log creates a proto binary log entry, and logs it to the sink. -func (ml *MethodLogger) Log(c LogEntryConfig) { +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in methodLogger as possible. +func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -85,11 +91,15 @@ func (ml *MethodLogger) Log(c LogEntryConfig) { case *pb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } + return m +} - ml.sink.Write(m) +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *methodLogger) Log(c LogEntryConfig) { + ml.sink.Write(ml.Build(c)) } -func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -119,7 +129,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { return truncated } -func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *methodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 6d5760d95..777cbcd79 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,6 +24,8 @@ package channelz import ( + "context" + "errors" "fmt" "sort" "sync" @@ -49,7 +51,8 @@ var ( // TurnOn turns on channelz data collection. func TurnOn() { if !IsOn() { - NewChannelzStorage() + db.set(newChannelMap()) + idGen.reset() atomic.StoreInt32(&curState, 1) } } @@ -94,46 +97,40 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorage initializes channelz data storage and id generator. +// NewChannelzStorageForTesting initializes channelz data storage and id +// generator for testing purposes. // -// This function returns a cleanup function to wait for all channelz state to be reset by the -// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests -// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen -// to remove some entity just register by the new test, since the id space is the same. -// -// Note: This function is exported for testing purpose only. User should not call -// it in most cases. -func NewChannelzStorage() (cleanup func() error) { - db.set(&channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - }) +// Returns a cleanup function to be invoked by the test, which waits for up to +// 10s for all channelz state to be reset by the grpc goroutines when those +// entities get closed. This cleanup function helps with ensuring that tests +// don't mess up each other. +func NewChannelzStorageForTesting() (cleanup func() error) { + db.set(newChannelMap()) idGen.reset() + return func() error { - var err error cm := db.get() if cm == nil { return nil } - for i := 0; i < 1000; i++ { - cm.mu.Lock() - if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { - cm.mu.Unlock() - // all things stored in the channelz map have been cleared. + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + for { + cm.mu.RLock() + topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) + cm.mu.RUnlock() + + if err := ctx.Err(); err != nil { + return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) + } + if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { return nil } - cm.mu.Unlock() - time.Sleep(10 * time.Millisecond) + <-ticker.C } - - cm.mu.Lock() - err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) - cm.mu.Unlock() - return err } } @@ -188,54 +185,77 @@ func GetServer(id int64) *ServerMetric { return db.get().GetServer(id) } -// RegisterChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). pid = 0 means no parent. It returns the unique channelz tracking id -// assigned to this channel. -func RegisterChannel(c Channel, pid int64, ref string) int64 { +// RegisterChannel registers the given channel c in the channelz database with +// ref as its reference name, and adds it to the child list of its parent +// (identified by pid). pid == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { id := idGen.genID() + var parent int64 + isTopChannel := true + if pid != nil { + isTopChannel = false + parent = pid.Int() + } + + if !IsOn() { + return newIdentifer(RefChannel, id, pid) + } + cn := &channel{ refName: ref, c: c, subChans: make(map[int64]string), nestedChans: make(map[int64]string), id: id, - pid: pid, + pid: parent, trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - if pid == 0 { - db.get().addChannel(id, cn, true, pid, ref) - } else { - db.get().addChannel(id, cn, false, pid, ref) - } - return id + db.get().addChannel(id, cn, isTopChannel, parent) + return newIdentifer(RefChannel, id, pid) } -// RegisterSubChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). It returns the unique channelz tracking id assigned to this subchannel. -func RegisterSubChannel(c Channel, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a SubChannel's parent id cannot be 0") - return 0 +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a SubChannel's parent id cannot be nil") } id := idGen.genID() + if !IsOn() { + return newIdentifer(RefSubChannel, id, pid), nil + } + sc := &subChannel{ refName: ref, c: c, sockets: make(map[int64]string), id: id, - pid: pid, + pid: pid.Int(), trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - db.get().addSubChannel(id, sc, pid, ref) - return id + db.get().addSubChannel(id, sc, pid.Int()) + return newIdentifer(RefSubChannel, id, pid), nil } // RegisterServer registers the given server s in channelz database. It returns // the unique channelz tracking id assigned to this server. -func RegisterServer(s Server, ref string) int64 { +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(s Server, ref string) *Identifier { id := idGen.genID() + if !IsOn() { + return newIdentifer(RefServer, id, nil) + } + svr := &server{ refName: ref, s: s, @@ -244,71 +264,92 @@ func RegisterServer(s Server, ref string) int64 { id: id, } db.get().addServer(id, svr) - return id + return newIdentifer(RefServer, id, nil) } // RegisterListenSocket registers the given listen socket s in channelz database // with ref as its reference name, and add it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this listen socket. -func RegisterListenSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a ListenSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a ListenSocket's parent id cannot be 0") } id := idGen.genID() - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addListenSocket(id, ls, pid, ref) - return id + if !IsOn() { + return newIdentifer(RefListenSocket, id, pid), nil + } + + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addListenSocket(id, ls, pid.Int()) + return newIdentifer(RefListenSocket, id, pid), nil } // RegisterNormalSocket registers the given normal socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent +// with ref as its reference name, and adds it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this normal socket. -func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a NormalSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a NormalSocket's parent id cannot be 0") } id := idGen.genID() - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addNormalSocket(id, ns, pid, ref) - return id + if !IsOn() { + return newIdentifer(RefNormalSocket, id, pid), nil + } + + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addNormalSocket(id, ns, pid.Int()) + return newIdentifer(RefNormalSocket, id, pid), nil } -// RemoveEntry removes an entry with unique channelz trakcing id to be id from +// RemoveEntry removes an entry with unique channelz tracking id to be id from // channelz database. -func RemoveEntry(id int64) { - db.get().removeEntry(id) +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id *Identifier) { + if !IsOn() { + return + } + db.get().removeEntry(id.Int()) } -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added -// to the channel trace. -// The Parent field is optional. It is used for event that will be recorded in the entity's parent -// trace also. +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe +// the event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. type TraceEventDesc struct { Desc string Severity Severity Parent *TraceEventDesc } -// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { - for d := desc; d != nil; d = d.Parent { - switch d.Severity { - case CtUnknown, CtInfo: - l.InfoDepth(depth+1, d.Desc) - case CtWarning: - l.WarningDepth(depth+1, d.Desc) - case CtError: - l.ErrorDepth(depth+1, d.Desc) - } +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { + // Log only the trace description associated with the bottom most entity. + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, withParens(id)+desc.Desc) + case CtWarning: + l.WarningDepth(depth+1, withParens(id)+desc.Desc) + case CtError: + l.ErrorDepth(depth+1, withParens(id)+desc.Desc) } + if getMaxTraceEntry() == 0 { return } - db.get().traceEvent(id, desc) + if IsOn() { + db.get().traceEvent(id.Int(), desc) + } } // channelMap is the storage data structure for channelz. @@ -326,6 +367,17 @@ type channelMap struct { normalSockets map[int64]*normalSocket } +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + } +} + func (c *channelMap) addServer(id int64, s *server) { c.mu.Lock() s.cm = c @@ -333,7 +385,7 @@ func (c *channelMap) addServer(id int64, s *server) { c.mu.Unlock() } -func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) { c.mu.Lock() cn.cm = c cn.trace.cm = c @@ -346,7 +398,7 @@ func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid in c.mu.Unlock() } -func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) { c.mu.Lock() sc.cm = c sc.trace.cm = c @@ -355,7 +407,7 @@ func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref stri c.mu.Unlock() } -func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) { c.mu.Lock() ls.cm = c c.listenSockets[id] = ls @@ -363,7 +415,7 @@ func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref c.mu.Unlock() } -func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) { c.mu.Lock() ns.cm = c c.normalSockets[id] = ns diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go new file mode 100644 index 000000000..c9a27acd3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/id.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import "fmt" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier struct { + typ RefChannelType + id int64 + str string + pid *Identifier +} + +// Type returns the entity type corresponding to id. +func (id *Identifier) Type() RefChannelType { + return id.typ +} + +// Int returns the integer identifier corresponding to id. +func (id *Identifier) Int() int64 { + return id.id +} + +// String returns a string representation of the entity corresponding to id. +// +// This includes some information about the parent as well. Examples: +// Top-level channel: [Channel #channel-number] +// Nested channel: [Channel #parent-channel-number Channel #channel-number] +// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] +func (id *Identifier) String() string { + return id.str +} + +// Equal returns true if other is the same as id. +func (id *Identifier) Equal(other *Identifier) bool { + if (id != nil) != (other != nil) { + return false + } + if id == nil && other == nil { + return true + } + return id.typ == other.typ && id.id == other.id && id.pid == other.pid +} + +// NewIdentifierForTesting returns a new opaque identifier to be used only for +// testing purposes. +func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { + return newIdentifer(typ, id, pid) +} + +func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { + str := fmt.Sprintf("%s #%d", typ, id) + if pid != nil { + str = fmt.Sprintf("%s %s", pid, str) + } + return &Identifier{typ: typ, id: id, str: str, pid: pid} +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index b0013f9c8..8e13a3d2c 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -26,77 +26,54 @@ import ( var logger = grpclog.Component("channelz") +func withParens(id *Identifier) string { + return "[" + id.String() + "] " +} + // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, args...) - } +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, msg) - } +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, + }) } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, args...) - } +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, msg) - } +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, + }) } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtError, - }) - } else { - l.ErrorDepth(1, args...) - } +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtError, - }) - } else { - l.ErrorDepth(1, msg) - } +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, + }) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 3c595d154..ad0ce4dab 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -686,12 +686,33 @@ const ( type RefChannelType int const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota // RefChannel indicates the referenced entity is a Channel. - RefChannel RefChannelType = iota + RefChannel // RefSubChannel indicates the referenced entity is a SubChannel. RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket ) +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + func (c *channelTrace) dumpData() *ChannelTrace { c.mu.Lock() ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index e766ac04a..6f0272543 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -22,19 +22,14 @@ package envconfig import ( "os" "strings" - - xdsenv "google.golang.org/grpc/internal/xds/env" ) const ( prefix = "GRPC_GO_" - retryStr = prefix + "RETRY" txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" ) var ( - // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on" or if XDS retry support is enabled. - Retry = strings.EqualFold(os.Getenv(retryStr), "on") || xdsenv.RetrySupport // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") ) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go new file mode 100644 index 000000000..7d996e51b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -0,0 +1,101 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import ( + "os" + "strings" +) + +const ( + // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. + // Do not use this and read from env directly. Its value is read and kept in + // variable XDSBootstrapFileName. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" + // XDSBootstrapFileContentEnv is the env variable to set bootstrap file + // content. Do not use this and read from env directly. Its value is read + // and kept in variable XDSBootstrapFileContent. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" + + ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" + clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" + aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" + outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" + federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" + rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" + + c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" +) + +var ( + // XDSBootstrapFileName holds the name of the file which contains xDS + // bootstrap configuration. Users can specify the location of the bootstrap + // file by setting the environment variable "GRPC_XDS_BOOTSTRAP". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv) + // XDSBootstrapFileContent holds the content of the xDS bootstrap + // configuration. Users can specify the bootstrap config by setting the + // environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) + // XDSRingHash indicates whether ring hash support is enabled, which can be + // disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". + XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false") + // XDSClientSideSecurity is used to control processing of security + // configuration on the client-side. + // + // Note that there is no env var protection for the server-side because we + // have a brand new API on the server-side and users explicitly need to use + // the new API to get security integration on the server. + XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false") + // XDSAggregateAndDNS indicates whether processing of aggregated cluster + // and DNS cluster is enabled, which can be enabled by setting the + // environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to + // "true". + XDSAggregateAndDNS = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true") + + // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, + // which can be disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". + XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") + // XDSOutlierDetection indicates whether outlier detection support is + // enabled, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true". + XDSOutlierDetection = strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "true") + // XDSFederation indicates whether federation support is enabled. + XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") + + // XDSRLS indicates whether processing of Cluster Specifier plugins and + // support for the RLS CLuster Specifier is enabled, which can be enabled by + // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to + // "true". + XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true") + + // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. + C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) +) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index e6f975cbf..30a3b4258 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -115,12 +115,12 @@ type LoggerV2 interface { // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...interface{}) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. WarningDepth(depth int, args ...interface{}) - // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. ErrorDepth(depth int, args ...interface{}) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...interface{}) } diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go new file mode 100644 index 000000000..e2f948e8f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go @@ -0,0 +1,20 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcutil provides utility functions used across the gRPC codebase. +package grpcutil diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go new file mode 100644 index 000000000..7a092b2b8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go @@ -0,0 +1,31 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import "regexp" + +// FullMatchWithRegex returns whether the full text matches the regex provided. +func FullMatchWithRegex(re *regexp.Regexp, text string) bool { + if len(text) == 0 { + return re.MatchString(text) + } + re.Longest() + rem := re.FindString(text) + return len(rem) == len(text) +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/target.go b/vendor/google.golang.org/grpc/internal/grpcutil/target.go deleted file mode 100644 index 8833021da..000000000 --- a/vendor/google.golang.org/grpc/internal/grpcutil/target.go +++ /dev/null @@ -1,89 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcutil provides a bunch of utility functions to be used across the -// gRPC codebase. -package grpcutil - -import ( - "strings" - - "google.golang.org/grpc/resolver" -) - -// split2 returns the values from strings.SplitN(s, sep, 2). -// If sep is not found, it returns ("", "", false) instead. -func split2(s, sep string) (string, string, bool) { - spl := strings.SplitN(s, sep, 2) - if len(spl) < 2 { - return "", "", false - } - return spl[0], spl[1], true -} - -// ParseTarget splits target into a resolver.Target struct containing scheme, -// authority and endpoint. skipUnixColonParsing indicates that the parse should -// not parse "unix:[path]" cases. This should be true in cases where a custom -// dialer is present, to prevent a behavior change. -// -// If target is not a valid scheme://authority/endpoint as specified in -// https://github.com/grpc/grpc/blob/master/doc/naming.md, -// it returns {Endpoint: target}. -func ParseTarget(target string, skipUnixColonParsing bool) (ret resolver.Target) { - var ok bool - if strings.HasPrefix(target, "unix-abstract:") { - if strings.HasPrefix(target, "unix-abstract://") { - // Maybe, with Authority specified, try to parse it - var remain string - ret.Scheme, remain, _ = split2(target, "://") - ret.Authority, ret.Endpoint, ok = split2(remain, "/") - if !ok { - // No Authority, add the "//" back - ret.Endpoint = "//" + remain - } else { - // Found Authority, add the "/" back - ret.Endpoint = "/" + ret.Endpoint - } - } else { - // Without Authority specified, split target on ":" - ret.Scheme, ret.Endpoint, _ = split2(target, ":") - } - return ret - } - ret.Scheme, ret.Endpoint, ok = split2(target, "://") - if !ok { - if strings.HasPrefix(target, "unix:") && !skipUnixColonParsing { - // Handle the "unix:[local/path]" and "unix:[/absolute/path]" cases, - // because splitting on :// only handles the - // "unix://[/absolute/path]" case. Only handle if the dialer is nil, - // to avoid a behavior change with custom dialers. - return resolver.Target{Scheme: "unix", Endpoint: target[len("unix:"):]} - } - return resolver.Target{Endpoint: target} - } - ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") - if !ok { - return resolver.Target{Endpoint: target} - } - if ret.Scheme == "unix" { - // Add the "/" back in the unix case, so the unix resolver receives the - // actual endpoint in the "unix://[/absolute/path]" case. - ret.Endpoint = "/" + ret.Endpoint - } - return ret -} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 1b596bf35..6d355b0b0 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -38,11 +38,10 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // ParseServiceConfigForTesting is for creating a fake - // ClientConn for resolver testing only - ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult + // ParseServiceConfig parses a JSON representation of the service config. + ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and - // parsing. Both a and b should be returned by ParseServiceConfigForTesting. + // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the // there's difference in white space. EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool @@ -86,3 +85,9 @@ const ( // that supports backend returned by grpclb balancer. CredsBundleModeBackendFromBalancer = "backend-from-balancer" ) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index 302262613..b2980f8ac 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -22,6 +22,9 @@ package metadata import ( + "fmt" + "strings" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" ) @@ -30,14 +33,38 @@ type mdKeyType string const mdKey = mdKeyType("grpc.internal.address.metadata") +type mdValue metadata.MD + +func (m mdValue) Equal(o interface{}) bool { + om, ok := o.(mdValue) + if !ok { + return false + } + if len(m) != len(om) { + return false + } + for k, v := range m { + ov := om[k] + if len(ov) != len(v) { + return false + } + for i, ve := range v { + if ov[i] != ve { + return false + } + } + } + return true +} + // Get returns the metadata of addr. func Get(addr resolver.Address) metadata.MD { attrs := addr.Attributes if attrs == nil { return nil } - md, _ := attrs.Value(mdKey).(metadata.MD) - return md + md, _ := attrs.Value(mdKey).(mdValue) + return metadata.MD(md) } // Set sets (overrides) the metadata in addr. @@ -45,6 +72,49 @@ func Get(addr resolver.Address) metadata.MD { // When a SubConn is created with this address, the RPCs sent on it will all // have this metadata. func Set(addr resolver.Address, md metadata.MD) resolver.Address { - addr.Attributes = addr.Attributes.WithValues(mdKey, md) + addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) return addr } + +// Validate returns an error if the input md contains invalid keys or values. +// +// If the header is not a pseudo-header, the following items are checked: +// - header names must contain one or more characters from this set [0-9 a-z _ - .]. +// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed. +// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E]. +func Validate(md metadata.MD) error { + for k, vals := range md { + // pseudo-header will be ignored + if k[0] == ':' { + continue + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(k); i++ { + r := k[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k) + } + } + if strings.HasSuffix(k, "-bin") { + continue + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k) + } + } + } + return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { + // for i that saving a conversion if not using for range + for i := 0; i < len(msg); i++ { + if msg[i] < 0x20 || msg[i] > 0x7E { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go new file mode 100644 index 000000000..0177af4b5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/golang/protobuf/jsonpb" + protov1 "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protov2 "google.golang.org/protobuf/proto" +) + +const jsonIndent = " " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e interface{}) string { + switch ee := e.(type) { + case protov1.Message: + mm := jsonpb.Marshaler{Indent: jsonIndent} + ret, err := mm.MarshalToString(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return ret + case protov2.Message: + mm := protojson.MarshalOptions{ + Multiline: true, + Indent: jsonIndent, + } + ret, err := mm.Marshal(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return string(ret) + default: + ret, err := json.MarshalIndent(ee, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", ee) + } + return string(ret) + } +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { + var out bytes.Buffer + err := json.Indent(&out, b, "", jsonIndent) + if err != nil { + return string(b) + } + return out.String() +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go index be7e13d58..c7a18a948 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -132,7 +132,7 @@ const csKey = csKeyType("grpc.internal.resolver.configSelector") // SetConfigSelector sets the config selector in state and returns the new // state. func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State { - state.Attributes = state.Attributes.WithValues(csKey, cs) + state.Attributes = state.Attributes.WithValue(csKey, cs) return state } diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 0d5a811dd..20852e59d 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -37,7 +37,17 @@ func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolv if target.Authority != "" { return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) } - addr := resolver.Address{Addr: target.Endpoint} + + // gRPC was parsing the dial target manually before PR #4817, and we + // switched to using url.Parse() in that PR. To avoid breaking existing + // resolver implementations we ended up stripping the leading "/" from the + // endpoint. This obviously does not work for the "unix" scheme. Hence we + // end up using the parsed URL instead. + endpoint := target.URL.Path + if endpoint == "" { + endpoint = target.URL.Opaque + } + addr := resolver.Address{Addr: endpoint} if b.scheme == unixAbstractScheme { // prepend "\x00" to address for unix-abstract addr.Addr = "\x00" + addr.Addr diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 45532f8ae..244f4b081 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -133,9 +133,11 @@ type cleanupStream struct { func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM type earlyAbortStream struct { + httpStatus uint32 streamID uint32 contentSubtype string status *status.Status + rst bool } func (*earlyAbortStream) isTransportResponseFrame() bool { return false } @@ -771,9 +773,12 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { if l.side == clientSide { return errors.New("earlyAbortStream not handled on client") } - + // In case the caller forgets to set the http status, default to 200. + if eas.httpStatus == 0 { + eas.httpStatus = 200 + } headerFields := []hpack.HeaderField{ - {Name: ":status", Value: "200"}, + {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))}, {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, @@ -782,6 +787,11 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { return err } + if eas.rst { + if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { + return err + } + } return nil } diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index f262edd8e..97198c515 100644 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -136,12 +136,10 @@ type inFlow struct { // newLimit updates the inflow window to a new value n. // It assumes that n is always greater than the old limit. -func (f *inFlow) newLimit(n uint32) uint32 { +func (f *inFlow) newLimit(n uint32) { f.mu.Lock() - d := n - f.limit f.limit = n f.mu.Unlock() - return d } func (f *inFlow) maybeAdjust(n uint32) uint32 { diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 755863074..24ca59084 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -25,6 +25,7 @@ import ( "math" "net" "net/http" + "path/filepath" "strconv" "strings" "sync" @@ -131,7 +132,7 @@ type http2Client struct { kpDormant bool // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData onGoAway func(GoAwayReason) @@ -146,13 +147,20 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error address := addr.Addr networkType, ok := networktype.Get(addr) if fn != nil { + // Special handling for unix scheme with custom dialer. Back in the day, + // we did not have a unix resolver and therefore targets with a unix + // scheme would end up using the passthrough resolver. So, user's used a + // custom dialer in this case and expected the original dial target to + // be passed to the custom dialer. Now, we have a unix resolver. But if + // a custom dialer is specified, we want to retain the old behavior in + // terms of the address being passed to the custom dialer. if networkType == "unix" && !strings.HasPrefix(address, "\x00") { - // For backward compatibility, if the user dialed "unix:///path", - // the passthrough resolver would be used and the user's custom - // dialer would see "unix:///path". Since the unix resolver is used - // and the address is now "/path", prepend "unix://" so the user's - // custom dialer sees the same address. - return fn(ctx, "unix://"+address) + // Supported unix targets are either "unix://absolute-path" or + // "unix:relative-path". + if filepath.IsAbs(address) { + return fn(ctx, "unix://"+address) + } + return fn(ctx, "unix:"+address) } return fn(ctx, address) } @@ -193,6 +201,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } }() + // gRPC, resolver, balancer etc. can specify arbitrary data in the + // Attributes field of resolver.Address, which is shoved into connectCtx + // and passed to the dialer and credential handshaker. This makes it possible for + // address specific arbitrary data to reach custom dialers and credential handshakers. + connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) if err != nil { if opts.FailOnNonTempDialError { @@ -237,11 +251,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } } if transportCreds != nil { - // gRPC, resolver, balancer etc. can specify arbitrary data in the - // Attributes field of resolver.Address, which is shoved into connectCtx - // and passed to the credential handshaker. This makes it possible for - // address specific arbitrary data to reach the credential handshaker. - connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) rawConn := conn // Pull the deadline from the connectCtx, which will be used for // timeouts in the authentication protocol handshake. Can ignore the @@ -342,8 +351,9 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } t.statsHandler.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + if err != nil { + return nil, err } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) @@ -579,7 +589,7 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s return nil, err } - return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err) + return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err) } for k, v := range data { // Capital header names are illegal in HTTP/2. @@ -621,8 +631,8 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call // the wire. However, there are two notable exceptions: // // 1. If the stream headers violate the max header list size allowed by the -// server. In this case there is no reason to retry at all, as it is -// assumed the RPC would continue to fail on subsequent attempts. +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. // 2. If the credentials errored when requesting their headers. In this case, // it's possible a retry can fix the problem, but indefinitely transparently // retrying is not appropriate as it is likely the credentials, if they can @@ -630,8 +640,7 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call type NewStreamError struct { Err error - DoNotRetry bool - DoNotTransparentRetry bool + AllowTransparentRetry bool } func (e NewStreamError) Error() string { @@ -640,11 +649,11 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true} + return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -744,13 +753,14 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return true }, hdr) if err != nil { - return nil, &NewStreamError{Err: err} + // Connection closed. + return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} } if success { break } if hdrListSizeErr != nil { - return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true} + return nil, &NewStreamError{Err: hdrListSizeErr} } firstTry = false select { @@ -758,9 +768,9 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea case <-ctx.Done(): return nil, &NewStreamError{Err: ContextErr(ctx.Err())} case <-t.goAway: - return nil, &NewStreamError{Err: errStreamDrain} + return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} case <-t.ctx.Done(): - return nil, &NewStreamError{Err: ErrConnClosing} + return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } if t.statsHandler != nil { @@ -889,9 +899,7 @@ func (t *http2Client) Close(err error) { t.controlBuf.finish() t.cancel() t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } + channelz.RemoveEntry(t.channelzID) // Append info about previous goaways if there were any, since this may be important // for understanding the root cause for this connection to be closed. _, goAwayDebugMessage := t.GetGoAwayReason() @@ -1073,7 +1081,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) { } // The server has closed the stream without sending trailers. Record that // the read direction is closed, and set the status appropriately. - if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + if f.StreamEnded() { t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) } } @@ -1403,26 +1411,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } isHeader := false - defer func() { - if t.statsHandler != nil { - if isHeader { - inHeader := &stats.InHeader{ - Client: true, - WireLength: int(frame.Header().Length), - Header: s.header.Copy(), - Compression: s.recvCompress, - } - t.statsHandler.HandleRPC(s.ctx, inHeader) - } else { - inTrailer := &stats.InTrailer{ - Client: true, - WireLength: int(frame.Header().Length), - Trailer: s.trailer.Copy(), - } - t.statsHandler.HandleRPC(s.ctx, inTrailer) - } - } - }() // If headerChan hasn't been closed yet if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { @@ -1444,6 +1432,25 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { close(s.headerChan) } + if t.statsHandler != nil { + if isHeader { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + Header: metadata.MD(mdata).Copy(), + Compression: s.recvCompress, + } + t.statsHandler.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + Trailer: metadata.MD(mdata).Copy(), + } + t.statsHandler.HandleRPC(s.ctx, inTrailer) + } + } + if !endStream { return } @@ -1549,7 +1556,7 @@ func minTime(a, b time.Duration) time.Duration { return b } -// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. +// keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { p := &ping{data: [8]byte{}} // True iff a ping has been sent, and no data has been received since then. diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 19c13e041..45d7bd145 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -21,7 +21,6 @@ package transport import ( "bytes" "context" - "errors" "fmt" "io" "math" @@ -36,6 +35,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -52,10 +52,10 @@ import ( var ( // ErrIllegalHeaderWrite indicates that setting header is illegal because of // the stream's state. - ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. - ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") + ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") ) // serverConnectionCounter counts the number of connections a server has seen @@ -73,7 +73,6 @@ type http2Server struct { writerDone chan struct{} // sync point to enable testing. remoteAddr net.Addr localAddr net.Addr - maxStreamID uint32 // max stream ID ever seen authInfo credentials.AuthInfo // auth info about the connection inTapHandle tap.ServerInHandle framer *framer @@ -118,18 +117,23 @@ type http2Server struct { idle time.Time // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData bufferPool *bufferPool connectionID uint64 + + // maxStreamMu guards the maximum stream ID + // This lock may not be taken if mu is already held. + maxStreamMu sync.Mutex + maxStreamID uint32 // max stream ID ever seen } // NewServerTransport creates a http2 transport with conn and configuration // options from config. // // It returns a non-nil transport and a nil error on success. On failure, it -// returns a non-nil transport and a nil-error. For a special case where the +// returns a nil transport and a non-nil error. For a special case where the // underlying conn gets closed before the client preface could be read, it // returns a nil transport and a nil error. func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { @@ -227,6 +231,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if kp.Timeout == 0 { kp.Timeout = defaultServerKeepaliveTimeout } + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } kep := config.KeepalivePolicy if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime @@ -271,12 +280,12 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, connBegin := &stats.ConnBegin{} t.stats.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + if err != nil { + return nil, err } t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) - t.framer.writer.Flush() defer func() { @@ -290,10 +299,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if _, err := io.ReadFull(t.conn, preface); err != nil { // In deployments where a gRPC server runs behind a cloud load balancer // which performs regular TCP level health checks, the connection is - // closed immediately by the latter. Skipping the error here will help - // reduce log clutter. + // closed immediately by the latter. Returning io.EOF here allows the + // grpc server implementation to recognize this scenario and suppress + // logging to reduce spam. if err == io.EOF { - return nil, nil + return nil, io.EOF } return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) } @@ -333,6 +343,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeader takes action on the decoded headers. func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { + // Acquire max stream ID lock for entire duration + t.maxStreamMu.Lock() + defer t.maxStreamMu.Unlock() + streamID := frame.Header().StreamID // frame.Truncated is set to true when framer detects that the current header @@ -347,6 +361,15 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( return false } + if streamID%2 != 1 || streamID <= t.maxStreamID { + // illegal gRPC stream id. + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + } + return true + } + t.maxStreamID = streamID + buf := newRecvBuffer() s := &Stream{ id: streamID, @@ -354,7 +377,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( buf: buf, fc: &inFlow{limit: uint32(t.initialWindowSize)}, } - var ( // If a gRPC Response-Headers has already been received, then it means // that the peer is speaking gRPC and we are in gRPC mode. @@ -390,6 +412,13 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if timeout, err = decodeTimeout(hf.Value); err != nil { headerError = true } + // "Transports must consider requests containing the Connection header + // as malformed." - A41 + case "connection": + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec") + } + headerError = true default: if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { break @@ -404,6 +433,26 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } } + // "If multiple Host headers or multiple :authority headers are present, the + // request must be rejected with an HTTP status code 400 as required by Host + // validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM + // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2 + // error, this takes precedence over a client not speaking gRPC. + if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { + errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) + if logger.V(logLevel) { + logger.Errorf("transport: %v", errMsg) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 400, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), + }) + return false + } + if !isGRPC || headerError { t.controlBuf.put(&cleanupStream{ streamID: streamID, @@ -414,6 +463,19 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( return false } + // "If :authority is missing, Host must be renamed to :authority." - A41 + if len(mdata[":authority"]) == 0 { + // No-op if host isn't present, no eventual :authority header is a valid + // RPC. + if host, ok := mdata["host"]; ok { + mdata[":authority"] = host + delete(mdata, "host") + } + } else { + // "If :authority is present, Host must be discarded" - A41 + delete(mdata, "host") + } + if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone @@ -458,26 +520,18 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.cancel() return false } - if streamID%2 != 1 || streamID <= t.maxStreamID { - t.mu.Unlock() - // illegal gRPC stream id. - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) - } - s.cancel() - return true - } - t.maxStreamID = streamID if httpMethod != http.MethodPost { t.mu.Unlock() + errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) if logger.V(logLevel) { - logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) + logger.Infof("transport: %v", errMsg) } - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: http2.ErrCodeProtocol, - onWrite: func() {}, + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 405, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), }) s.cancel() return false @@ -494,9 +548,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( stat = status.New(codes.PermissionDenied, err.Error()) } t.controlBuf.put(&earlyAbortStream{ + httpStatus: 200, streamID: s.id, contentSubtype: s.contentSubtype, status: stat, + rst: !frame.StreamEnded(), }) return false } @@ -734,7 +790,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) { s.write(recvMsg{buffer: buffer}) } } - if f.Header().Flags.Has(http2.FlagDataEndStream) { + if f.StreamEnded() { // Received the end of stream from the client. s.compareAndSwapState(streamActive, streamReadDone) s.write(recvMsg{err: io.EOF}) @@ -878,11 +934,25 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { return true } +func (t *http2Server) streamContextErr(s *Stream) error { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) +} + // WriteHeader sends the header metadata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - if s.updateHeaderSent() || s.getState() == streamDone { + if s.updateHeaderSent() { return ErrIllegalHeaderWrite } + + if s.getState() == streamDone { + return t.streamContextErr(s) + } + s.hdrMu.Lock() if md.Len() > 0 { if s.header.Len() > 0 { @@ -893,7 +963,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } if err := t.writeHeaderLocked(s); err != nil { s.hdrMu.Unlock() - return err + return status.Convert(err).Err() } s.hdrMu.Unlock() return nil @@ -1009,23 +1079,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // TODO(mmukhi, dfawley): Make sure this is the right code to return. - return status.Errorf(codes.Internal, "transport: %v", err) + return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - // TODO(mmukhi, dfawley): Should the server write also return io.EOF? - s.cancel() - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } } df := &dataFrame{ @@ -1035,12 +1094,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e onEachWrite: t.setResetPingStrikes, } if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } return t.controlBuf.put(df) } @@ -1163,9 +1217,7 @@ func (t *http2Server) Close() { if err := t.conn.Close(); err != nil && logger.V(logLevel) { logger.Infof("transport: error closing conn during Close: %v", err) } - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } + channelz.RemoveEntry(t.channelzID) // Cancel all active streams. for _, s := range streams { s.cancel() @@ -1178,10 +1230,6 @@ func (t *http2Server) Close() { // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1203,6 +1251,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { // finishStream closes the stream and puts the trailing headerFrame into controlbuf. func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + oldState := s.swapState(streamDone) if oldState == streamDone { // If the stream was already done, return. @@ -1222,6 +1275,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h // closeStream clears the footprint of a stream when the stream is not needed any more. func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + s.swapState(streamDone) t.deleteStream(s, eosReceived) @@ -1252,20 +1310,23 @@ var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} // Handles outgoing GoAway and returns true if loopy needs to put itself // in draining mode. func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.maxStreamMu.Lock() t.mu.Lock() if t.state == closing { // TODO(mmukhi): This seems unnecessary. t.mu.Unlock() + t.maxStreamMu.Unlock() // The transport is closing. return false, ErrConnClosing } - sid := t.maxStreamID if !g.headsUp { // Stop accepting more streams now. t.state = draining + sid := t.maxStreamID if len(t.activeStreams) == 0 { g.closeConn = true } t.mu.Unlock() + t.maxStreamMu.Unlock() if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } @@ -1278,6 +1339,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { return true, nil } t.mu.Unlock() + t.maxStreamMu.Unlock() // For a graceful close, send out a GoAway with stream ID of MaxUInt32, // Follow that with a ping and wait for the ack to come back or a timer // to expire. During this time accept new streams since they might have diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go index 7bb53cff1..c11b52782 100644 --- a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go +++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go @@ -31,7 +31,7 @@ const key = keyType("grpc.internal.transport.networktype") // Set returns a copy of the provided address with attributes containing networkType. func Set(address resolver.Address, networkType string) resolver.Address { - address.Attributes = address.Attributes.WithValues(key, networkType) + address.Attributes = address.Attributes.WithValue(key, networkType) return address } diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index a662bf39a..415961987 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -37,7 +37,7 @@ var ( httpProxyFromEnvironment = http.ProxyFromEnvironment ) -func mapAddress(ctx context.Context, address string) (*url.URL, error) { +func mapAddress(address string) (*url.URL, error) { req := &http.Request{ URL: &url.URL{ Scheme: "https", @@ -114,7 +114,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri // connection. func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { newAddr := addr - proxyURL, err := mapAddress(ctx, addr) + proxyURL, err := mapAddress(addr) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index d3bf65b2b..a9ce717f1 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -529,7 +530,7 @@ type ServerConfig struct { InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 } @@ -563,7 +564,7 @@ type ConnectOptions struct { // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. @@ -741,6 +742,12 @@ func (e ConnectionError) Origin() error { return e.err } +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { + return e.err +} + var ( // ErrConnClosing indicates that the transport is closing. ErrConnClosing = connectionErrorf(true, nil, "transport is closing") diff --git a/vendor/google.golang.org/grpc/internal/xds/env/env.go b/vendor/google.golang.org/grpc/internal/xds/env/env.go deleted file mode 100644 index b171ac91f..000000000 --- a/vendor/google.golang.org/grpc/internal/xds/env/env.go +++ /dev/null @@ -1,95 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package env acts a single source of definition for all environment variables -// related to the xDS implementation in gRPC. -package env - -import ( - "os" - "strings" -) - -const ( - // BootstrapFileNameEnv is the env variable to set bootstrap file name. - // Do not use this and read from env directly. Its value is read and kept in - // variable BootstrapFileName. - // - // When both bootstrap FileName and FileContent are set, FileName is used. - BootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" - // BootstrapFileContentEnv is the env variable to set bootstrapp file - // content. Do not use this and read from env directly. Its value is read - // and kept in variable BootstrapFileName. - // - // When both bootstrap FileName and FileContent are set, FileName is used. - BootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" - - ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" - clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" - aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" - retrySupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY" - rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RBAC" - - c2pResolverSupportEnv = "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" - c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" -) - -var ( - // BootstrapFileName holds the name of the file which contains xDS bootstrap - // configuration. Users can specify the location of the bootstrap file by - // setting the environment variable "GRPC_XDS_BOOTSTRAP". - // - // When both bootstrap FileName and FileContent are set, FileName is used. - BootstrapFileName = os.Getenv(BootstrapFileNameEnv) - // BootstrapFileContent holds the content of the xDS bootstrap - // configuration. Users can specify the bootstrap config by - // setting the environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". - // - // When both bootstrap FileName and FileContent are set, FileName is used. - BootstrapFileContent = os.Getenv(BootstrapFileContentEnv) - // RingHashSupport indicates whether ring hash support is enabled, which can - // be disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". - RingHashSupport = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false") - // ClientSideSecuritySupport is used to control processing of security - // configuration on the client-side. - // - // Note that there is no env var protection for the server-side because we - // have a brand new API on the server-side and users explicitly need to use - // the new API to get security integration on the server. - ClientSideSecuritySupport = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false") - // AggregateAndDNSSupportEnv indicates whether processing of aggregated - // cluster and DNS cluster is enabled, which can be enabled by setting the - // environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to - // "true". - AggregateAndDNSSupportEnv = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true") - - // RetrySupport indicates whether xDS retry is enabled. - RetrySupport = !strings.EqualFold(os.Getenv(retrySupportEnv), "false") - - // RBACSupport indicates whether xDS configured RBAC HTTP Filter is enabled. - RBACSupport = strings.EqualFold(os.Getenv(rbacSupportEnv), "true") - - // C2PResolverSupport indicates whether support for C2P resolver is enabled. - // This can be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" to "true". - C2PResolverSupport = strings.EqualFold(os.Getenv(c2pResolverSupportEnv), "true") - // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. - C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) -) diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go index 3677c3f04..e8b492774 100644 --- a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go +++ b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go @@ -28,7 +28,7 @@ type handshakeClusterNameKey struct{} // SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field // is updated with the cluster name. func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { - addr.Attributes = addr.Attributes.WithValues(handshakeClusterNameKey{}, clusterName) + addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName) return addr } diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 3604c7819..8e0f6abe8 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -188,7 +188,9 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + s := make([]string, len(v)) + copy(s, v) + out[key] = s } return out, true } @@ -226,7 +228,9 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) { // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + s := make([]string, len(v)) + copy(s, v) + out[key] = s } for _, added := range raw.added { if len(added)%2 == 1 { diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 0878ada9d..843633c91 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -131,7 +131,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } if _, ok := status.FromError(err); ok { // Status error: end the RPC unconditionally with this status. - return nil, nil, err + return nil, nil, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -144,7 +144,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. acw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { - logger.Error("subconn returned from pick is not *acBalancerWrapper") + logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) continue } if t := acw.getAddrConn().getReadyTransport(); t != nil { @@ -175,3 +175,9 @@ func (pw *pickerWrapper) close() { pw.done = true close(pw.blockingCh) } + +// dropError is a wrapper error that indicates the LB policy wishes to drop the +// RPC and not retry it. +type dropError struct { + error +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index f194d14a0..fb7a99e0a 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -44,79 +44,107 @@ func (*pickfirstBuilder) Name() string { } type pickfirstBalancer struct { - state connectivity.State - cc balancer.ClientConn - sc balancer.SubConn + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn } func (b *pickfirstBalancer) ResolverError(err error) { - switch b.state { - case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: - // Set a failing picker if we don't have a good picker. - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) - } if logger.V(2) { logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) } + if b.subConn == nil { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) } -func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { - if len(cs.ResolverState.Addresses) == 0 { +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if len(state.ResolverState.Addresses) == 0 { + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { + // Remove the old subConn. All addresses were removed, so it is no longer + // valid. + b.cc.RemoveSubConn(b.subConn) + b.subConn = nil + } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if b.sc == nil { - var err error - b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) - if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, - }) - return balancer.ErrBadResolverState + + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) + return nil + } + + subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) } - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) - b.sc.Connect() - } else { - b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses) - b.sc.Connect() + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState } + b.subConn = subConn + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}}, + }) + b.subConn.Connect() return nil } -func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) } - if b.sc != sc { + if b.subConn != subConn { if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") } return } - b.state = s.ConnectivityState - if s.ConnectivityState == connectivity.Shutdown { - b.sc = nil + b.state = state.ConnectivityState + if state.ConnectivityState == connectivity.Shutdown { + b.subConn = nil return } - switch s.ConnectivityState { + switch state.ConnectivityState { case connectivity.Ready: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, + }) case connectivity.Connecting: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) case connectivity.Idle: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &idlePicker{sc: sc}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &idlePicker{subConn: subConn}, + }) case connectivity.TransientFailure: b.cc.UpdateState(balancer.State{ - ConnectivityState: s.ConnectivityState, - Picker: &picker{err: s.ConnectionError}, + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: state.ConnectionError}, }) } } @@ -125,8 +153,8 @@ func (b *pickfirstBalancer) Close() { } func (b *pickfirstBalancer) ExitIdle() { - if b.state == connectivity.Idle { - b.sc.Connect() + if b.subConn != nil && b.state == connectivity.Idle { + b.subConn.Connect() } } @@ -135,18 +163,18 @@ type picker struct { err error } -func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return p.result, p.err } // idlePicker is used when the SubConn is IDLE and kicks the SubConn into // CONNECTING when Pick is called. type idlePicker struct { - sc balancer.SubConn + subConn balancer.SubConn } -func (i *idlePicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - i.sc.Connect() +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.subConn.Connect() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index dfd3226a1..978b89f37 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -27,9 +27,9 @@ export PATH=${GOBIN}:${PATH} mkdir -p ${GOBIN} echo "remove existing generated files" -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go') +# grpc_testing_not_regenerate/*.pb.go is not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" (cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) @@ -76,7 +76,21 @@ SOURCES=( # These options of the form 'Mfoo.proto=bar' instruct the codegen to use an # import path of 'bar' in the generated code when 'foo.proto' is imported in # one of the sources. -OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core +# +# Note that the protos listed here are all for testing purposes. All protos to +# be used externally should have a go_package option (and they don't need to be +# listed here). +OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\ +Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing for src in ${SOURCES[@]}; do echo "protoc ${src}" @@ -85,7 +99,6 @@ for src in ${SOURCES[@]}; do -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ -I${WORKDIR}/protobuf/src \ - -I${WORKDIR}/istio \ ${src} done @@ -96,18 +109,17 @@ for src in ${LEGACY_SOURCES[@]}; do -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ -I${WORKDIR}/protobuf/src \ - -I${WORKDIR}/istio \ ${src} done # The go_package option in grpc/lookup/v1/rls.proto doesn't match the # current location. Move it into the right place. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 -mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 +mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go +# grpc_testing_not_regenerate/*.pb.go are not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go # grpc/service_config/service_config.proto does not have a go_package option. mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go new file mode 100644 index 000000000..e87ecd0ee --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -0,0 +1,109 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +type addressMapEntry struct { + addr Address + value interface{} +} + +// AddressMap is a map of addresses to arbitrary values taking into account +// Attributes. BalancerAttributes are ignored, as are Metadata and Type. +// Multiple accesses may not be performed concurrently. Must be created via +// NewAddressMap; do not construct directly. +type AddressMap struct { + m map[string]addressMapEntryList +} + +type addressMapEntryList []*addressMapEntry + +// NewAddressMap creates a new AddressMap. +func NewAddressMap() *AddressMap { + return &AddressMap{m: make(map[string]addressMapEntryList)} +} + +// find returns the index of addr in the addressMapEntry slice, or -1 if not +// present. +func (l addressMapEntryList) find(addr Address) int { + if len(l) == 0 { + return -1 + } + for i, entry := range l { + if entry.addr.ServerName == addr.ServerName && + entry.addr.Attributes.Equal(addr.Attributes) { + return i + } + } + return -1 +} + +// Get returns the value for the address in the map, if present. +func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { + entryList := a.m[addr.Addr] + if entry := entryList.find(addr); entry != -1 { + return entryList[entry].value, true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (a *AddressMap) Set(addr Address, value interface{}) { + entryList := a.m[addr.Addr] + if entry := entryList.find(addr); entry != -1 { + a.m[addr.Addr][entry].value = value + return + } + a.m[addr.Addr] = append(a.m[addr.Addr], &addressMapEntry{addr: addr, value: value}) +} + +// Delete removes addr from the map. +func (a *AddressMap) Delete(addr Address) { + entryList := a.m[addr.Addr] + entry := entryList.find(addr) + if entry == -1 { + return + } + if len(entryList) == 1 { + entryList = nil + } else { + copy(entryList[entry:], entryList[entry+1:]) + entryList = entryList[:len(entryList)-1] + } + a.m[addr.Addr] = entryList +} + +// Len returns the number of entries in the map. +func (a *AddressMap) Len() int { + ret := 0 + for _, entryList := range a.m { + ret += len(entryList) + } + return ret +} + +// Keys returns a slice of all current map keys. +func (a *AddressMap) Keys() []Address { + ret := make([]Address, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.addr) + } + } + return ret +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 6a9d234a5..ca2e35a35 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -23,9 +23,11 @@ package resolver import ( "context" "net" + "net/url" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" ) @@ -116,9 +118,14 @@ type Address struct { ServerName string // Attributes contains arbitrary data about this address intended for - // consumption by the load balancing policy. + // consumption by the SubConn. Attributes *attributes.Attributes + // BalancerAttributes contains arbitrary data about this address intended + // for consumption by the LB policy. These attribes do not affect SubConn + // creation, connection establishment, handshaking, etc. + BalancerAttributes *attributes.Attributes + // Type is the type of this address. // // Deprecated: use Attributes instead. @@ -131,6 +138,20 @@ type Address struct { Metadata interface{} } +// Equal returns whether a and o are identical. Metadata is compared directly, +// not with any recursive introspection. +func (a Address) Equal(o Address) bool { + return a.Addr == o.Addr && a.ServerName == o.ServerName && + a.Attributes.Equal(o.Attributes) && + a.BalancerAttributes.Equal(o.BalancerAttributes) && + a.Type == o.Type && a.Metadata == o.Metadata +} + +// String returns JSON formatted string representation of the address. +func (a Address) String() string { + return pretty.ToJSON(a) +} + // BuildOptions includes additional information for the builder to create // the resolver. type BuildOptions struct { @@ -204,25 +225,36 @@ type ClientConn interface { // Target represents a target for gRPC, as specified in: // https://github.com/grpc/grpc/blob/master/doc/naming.md. -// It is parsed from the target string that gets passed into Dial or DialContext by the user. And -// grpc passes it to the resolver and the balancer. +// It is parsed from the target string that gets passed into Dial or DialContext +// by the user. And gRPC passes it to the resolver and the balancer. // -// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will -// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed -// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// If the target follows the naming spec, and the parsed scheme is registered +// with gRPC, we will parse the target string according to the spec. If the +// target does not contain a scheme or if the parsed scheme is not registered +// (i.e. no corresponding resolver available to resolve the endpoint), we will +// apply the default scheme, and will attempt to reparse it. // -// If the target does not contain a scheme, we will apply the default scheme, and set the Target to -// be the full target string. e.g. "foo.bar" will be parsed into -// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}. +// Examples: // -// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the -// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target -// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into -// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}. +// - "dns://some_authority/foo.bar" +// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// - "foo.bar" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} +// - "unknown_scheme://authority/endpoint" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { - Scheme string + // Deprecated: use URL.Scheme instead. + Scheme string + // Deprecated: use URL.Host instead. Authority string - Endpoint string + // Deprecated: use URL.Path or URL.Opaque instead. The latter is set when + // the former is empty. + Endpoint string + // URL contains the parsed dial target with an optional default scheme added + // to it if the original dial target contained no scheme or contained an + // unregistered scheme. Any query params specified in the original dial + // target can be accessed from here. + URL url.URL } // Builder creates a resolver that will be used to watch name resolution updates. diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 2c47cd54f..05a9d4e0b 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -19,7 +19,6 @@ package grpc import ( - "fmt" "strings" "sync" @@ -27,6 +26,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -97,10 +97,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { if ccr.done.HasFired() { return nil } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(s) - } + ccr.addChannelzTraceEvent(s) ccr.curState = s if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { return balancer.ErrBadResolverState @@ -125,10 +122,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) ccr.curState.Addresses = addrs ccr.cc.updateResolverState(ccr.curState, nil) } @@ -141,7 +135,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) if ccr.cc.dopts.disableServiceConfig { channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") return @@ -151,9 +145,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) return } - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) ccr.curState.ServiceConfig = scpr ccr.cc.updateResolverState(ccr.curState, nil) } @@ -180,8 +172,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), - Severity: channelz.CtInfo, - }) + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 87987a2e6..5d407b004 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -712,13 +712,11 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei if err != nil { return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } - } else { - size = len(d) - } - if size > maxReceiveMessageSize { - // TODO: Revisit the error code. Currently keep it consistent with java - // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize) + if size > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + } } return d, nil } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 557f29559..65de84b30 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -134,7 +134,7 @@ type Server struct { channelzRemoveOnce sync.Once serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData serverWorkerChannels []chan *serverWorkerData @@ -584,9 +584,8 @@ func NewServer(opt ...ServerOption) *Server { s.initServerWorkers() } - if channelz.IsOn() { - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") - } + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + channelz.Info(logger, s.channelzID, "Server created") return s } @@ -712,7 +711,7 @@ var ErrServerStopped = errors.New("grpc: the server has been stopped") type listenSocket struct { net.Listener - channelzID int64 + channelzID *channelz.Identifier } func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { @@ -724,9 +723,8 @@ func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { func (l *listenSocket) Close() error { err := l.Listener.Close() - if channelz.IsOn() { - channelz.RemoveEntry(l.channelzID) - } + channelz.RemoveEntry(l.channelzID) + channelz.Info(logger, l.channelzID, "ListenSocket deleted") return err } @@ -759,11 +757,6 @@ func (s *Server) Serve(lis net.Listener) error { ls := &listenSocket{Listener: lis} s.lis[ls] = true - if channelz.IsOn() { - ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) - } - s.mu.Unlock() - defer func() { s.mu.Lock() if s.lis != nil && s.lis[ls] { @@ -773,8 +766,16 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() - var tempDelay time.Duration // how long to sleep on accept failure + var err error + ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + if err != nil { + s.mu.Unlock() + return err + } + s.mu.Unlock() + channelz.Info(logger, ls.channelzID, "ListenSocket created") + var tempDelay time.Duration // how long to sleep on accept failure for { rawConn, err := lis.Accept() if err != nil { @@ -885,13 +886,11 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { // ErrConnDispatched means that the connection was dispatched away from // gRPC; those connections should be left open. if err != credentials.ErrConnDispatched { - c.Close() - } - // Don't log on ErrConnDispatched and io.EOF to prevent log spam. - if err != credentials.ErrConnDispatched { + // Don't log on ErrConnDispatched and io.EOF to prevent log spam. if err != io.EOF { channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) } + c.Close() } return nil } @@ -1106,16 +1105,21 @@ func chainUnaryServerInterceptors(s *Server) { func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - var i int - var next UnaryHandler - next = func(ctx context.Context, req interface{}) (interface{}, error) { - if i == len(interceptors)-1 { - return interceptors[i](ctx, req, info, handler) + // the struct ensures the variables are allocated together, rather than separately, since we + // know they should be garbage collected together. This saves 1 allocation and decreases + // time/call by about 10% on the microbenchmark. + var state struct { + i int + next UnaryHandler + } + state.next = func(ctx context.Context, req interface{}) (interface{}, error) { + if state.i == len(interceptors)-1 { + return interceptors[state.i](ctx, req, info, handler) } - i++ - return interceptors[i-1](ctx, req, info, next) + state.i++ + return interceptors[state.i-1](ctx, req, info, state.next) } - return next(ctx, req) + return state.next(ctx, req) } } @@ -1280,9 +1284,10 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - // Convert appErr if it is not a grpc status error. - appErr = status.Error(codes.Unknown, appErr.Error()) - appStatus, _ = status.FromError(appErr) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() } if trInfo != nil { trInfo.tr.LazyLog(stringer(appStatus.Message()), true) @@ -1391,16 +1396,21 @@ func chainStreamServerInterceptors(s *Server) { func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - var i int - var next StreamHandler - next = func(srv interface{}, ss ServerStream) error { - if i == len(interceptors)-1 { - return interceptors[i](srv, ss, info, handler) + // the struct ensures the variables are allocated together, rather than separately, since we + // know they should be garbage collected together. This saves 1 allocation and decreases + // time/call by about 10% on the microbenchmark. + var state struct { + i int + next StreamHandler + } + state.next = func(srv interface{}, ss ServerStream) error { + if state.i == len(interceptors)-1 { + return interceptors[state.i](srv, ss, info, handler) } - i++ - return interceptors[i-1](srv, ss, info, next) + state.i++ + return interceptors[state.i-1](srv, ss, info, state.next) } - return next(srv, ss) + return state.next(srv, ss) } } @@ -1541,7 +1551,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - appStatus = status.New(codes.Unknown, appErr.Error()) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) appErr = appStatus.Err() } if trInfo != nil { @@ -1698,11 +1710,7 @@ func (s *Server) Stop() { s.done.Fire() }() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() listeners := s.lis @@ -1740,11 +1748,7 @@ func (s *Server) GracefulStop() { s.quit.Fire() defer s.done.Fire() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() if s.conns == nil { s.mu.Unlock() @@ -1797,12 +1801,26 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return codec } -// SetHeader sets the header metadata. -// When called multiple times, all the provided metadata will be merged. -// All the metadata will be sent out when one of the following happens: -// - grpc.SendHeader() is called; -// - The first response is sent out; -// - An RPC status is sent out (error or success). +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged. All +// the metadata will be sent out when one of the following happens: +// +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetHeader(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil @@ -1814,8 +1832,14 @@ func SetHeader(ctx context.Context, md metadata.MD) error { return stream.SetHeader(md) } -// SendHeader sends header metadata. It may be called at most once. -// The provided md and headers set by SetHeader() will be sent. +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list). The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SendHeader(ctx context.Context, md metadata.MD) error { stream := ServerTransportStreamFromContext(ctx) if stream == nil { @@ -1829,6 +1853,10 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetTrailer(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 22c4240cf..b01c548bb 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -218,7 +218,7 @@ type jsonSC struct { } func init() { - internal.ParseServiceConfigForTesting = parseServiceConfig + internal.ParseServiceConfig = parseServiceConfig } func parseServiceConfig(js string) *serviceconfig.ParseResult { if len(js) == 0 { @@ -381,6 +381,9 @@ func init() { // // If any of them is NOT *ServiceConfig, return false. func equalServiceConfig(a, b serviceconfig.Config) bool { + if a == nil && b == nil { + return true + } aa, ok := a.(*ServiceConfig) if !ok { return false diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 54d187186..6d163b6e3 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -29,6 +29,7 @@ package status import ( "context" + "errors" "fmt" spb "google.golang.org/genproto/googleapis/rpc/status" @@ -73,11 +74,16 @@ func FromProto(s *spb.Status) *Status { return status.FromProto(s) } -// FromError returns a Status representing err if it was produced by this -// package or has a method `GRPCStatus() *Status`. -// If err is nil, a Status is returned with codes.OK and no message. -// Otherwise, ok is false and a Status is returned with codes.Unknown and -// the original error message. +// FromError returns a Status representation of err. +// +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status`, the appropriate Status is returned. +// +// - If err is nil, a Status is returned with codes.OK and no message. +// +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true @@ -112,18 +118,18 @@ func Code(err error) codes.Code { return codes.Unknown } -// FromContextError converts a context error into a Status. It returns a -// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is -// non-nil and not a context error. +// FromContextError converts a context error or wrapped context error into a +// Status. It returns a Status with codes.OK if err is nil, or a Status with +// codes.Unknown if err is non-nil and not a context error. func FromContextError(err error) *Status { - switch err { - case nil: + if err == nil { return nil - case context.DeadlineExceeded: + } + if errors.Is(err, context.DeadlineExceeded) { return New(codes.DeadlineExceeded, err.Error()) - case context.Canceled: + } + if errors.Is(err, context.Canceled) { return New(codes.Canceled, err.Error()) - default: - return New(codes.Unknown, err.Error()) } + return New(codes.Unknown, err.Error()) } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 625d47b34..236fc17ec 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -36,6 +36,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/transport" @@ -46,10 +47,12 @@ import ( ) // StreamHandler defines the handler called by gRPC server to complete the -// execution of a streaming RPC. If a StreamHandler returns an error, it -// should be produced by the status package, or else gRPC will use -// codes.Unknown as the status code and err.Error() as the status message -// of the RPC. +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type StreamHandler func(srv interface{}, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used @@ -164,6 +167,11 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if err := imetadata.Validate(md); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } if channelz.IsOn() { cc.incrCallsStarted() defer func() { @@ -295,14 +303,28 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client } cs.binlog = binarylog.GetMethodLogger(method) - if err := cs.newAttemptLocked(false /* isTransparent */); err != nil { + cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */) + if err != nil { cs.finish(err) return nil, err } - op := func(a *csAttempt) error { return a.newStream() } + // Pick the transport to use and create a new stream on the transport. + // Assign cs.attempt upon success. + op := func(a *csAttempt) error { + if err := a.getTransport(); err != nil { + return err + } + if err := a.newStream(); err != nil { + return err + } + // Because this operation is always called either here (while creating + // the clientStream) or by the retry code while locked when replaying + // the operation, it is safe to access cs.attempt directly. + cs.attempt = a + return nil + } if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { - cs.finish(err) return nil, err } @@ -341,9 +363,15 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client return cs, nil } -// newAttemptLocked creates a new attempt with a transport. -// If it succeeds, then it replaces clientStream's attempt with this new attempt. -func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { +// newAttemptLocked creates a new csAttempt without a transport or stream. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { + if err := cs.ctx.Err(); err != nil { + return nil, toRPCErr(err) + } + if err := cs.cc.ctx.Err(); err != nil { + return nil, ErrClientConnClosing + } + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) method := cs.callHdr.Method sh := cs.cc.dopts.copts.StatsHandler @@ -377,44 +405,39 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { ctx = trace.NewContext(ctx, trInfo.tr) } - newAttempt := &csAttempt{ + if cs.cc.parsedTarget.Scheme == "xds" { + // Add extra metadata (metadata that will be added by transport) to context + // so the balancer can see them. + ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( + "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), + )) + } + + return &csAttempt{ ctx: ctx, beginTime: beginTime, cs: cs, dc: cs.cc.dopts.dc, statsHandler: sh, trInfo: trInfo, - } - defer func() { - if retErr != nil { - // This attempt is not set in the clientStream, so it's finish won't - // be called. Call it here for stats and trace in case they are not - // nil. - newAttempt.finish(retErr) - } - }() + }, nil +} - if err := ctx.Err(); err != nil { - return toRPCErr(err) - } +func (a *csAttempt) getTransport() error { + cs := a.cs - if cs.cc.parsedTarget.Scheme == "xds" { - // Add extra metadata (metadata that will be added by transport) to context - // so the balancer can see them. - ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( - "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), - )) - } - t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) + var err error + a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { + if de, ok := err.(dropError); ok { + err = de.error + a.drop = true + } return err } - if trInfo != nil { - trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + if a.trInfo != nil { + a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) } - newAttempt.t = t - newAttempt.done = done - cs.attempt = newAttempt return nil } @@ -423,12 +446,21 @@ func (a *csAttempt) newStream() error { cs.callHdr.PreviousAttempts = cs.numRetries s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { - // Return without converting to an RPC error so retry code can - // inspect. - return err + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected. + return err + } + + if nse.AllowTransparentRetry { + a.allowTransparentRetry = true + } + + // Unwrap and convert error. + return toRPCErr(nse.Err) } - cs.attempt.s = s - cs.attempt.p = &parser{r: s} + a.s = s + a.p = &parser{r: s} return nil } @@ -454,7 +486,7 @@ type clientStream struct { retryThrottler *retryThrottler // The throttler active when the RPC began. - binlog *binarylog.MethodLogger // Binary logger, can be nil. + binlog binarylog.MethodLogger // Binary logger, can be nil. // serverHeaderBinlogged is a boolean for whether server header has been // logged. Server header will be logged when the first time one of those // happens: stream.Header(), stream.Recv(). @@ -506,6 +538,11 @@ type csAttempt struct { statsHandler stats.Handler beginTime time.Time + + // set for newStream errors that may be transparently retried + allowTransparentRetry bool + // set for pick errors that are returned as a status + drop bool } func (cs *clientStream) commitAttemptLocked() { @@ -525,41 +562,21 @@ func (cs *clientStream) commitAttempt() { // shouldRetry returns nil if the RPC should be retried; otherwise it returns // the error that should be returned by the operation. If the RPC should be // retried, the bool indicates whether it is being retried transparently. -func (cs *clientStream) shouldRetry(err error) (bool, error) { - if cs.attempt.s == nil { - // Error from NewClientStream. - nse, ok := err.(*transport.NewStreamError) - if !ok { - // Unexpected, but assume no I/O was performed and the RPC is not - // fatal, so retry indefinitely. - return true, nil - } - - // Unwrap and convert error. - err = toRPCErr(nse.Err) - - // Never retry DoNotRetry errors, which indicate the RPC should not be - // retried due to max header list size violation, etc. - if nse.DoNotRetry { - return false, err - } +func (a *csAttempt) shouldRetry(err error) (bool, error) { + cs := a.cs - // In the event of a non-IO operation error from NewStream, we never - // attempted to write anything to the wire, so we can retry - // indefinitely. - if !nse.DoNotTransparentRetry { - return true, nil - } - } - if cs.finished || cs.committed { - // RPC is finished or committed; cannot retry. + if cs.finished || cs.committed || a.drop { + // RPC is finished or committed or was dropped by the picker; cannot retry. return false, err } + if a.s == nil && a.allowTransparentRetry { + return true, nil + } // Wait for the trailers. unprocessed := false - if cs.attempt.s != nil { - <-cs.attempt.s.Done() - unprocessed = cs.attempt.s.Unprocessed() + if a.s != nil { + <-a.s.Done() + unprocessed = a.s.Unprocessed() } if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. @@ -571,14 +588,14 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { pushback := 0 hasPushback := false - if cs.attempt.s != nil { - if !cs.attempt.s.TrailersOnly() { + if a.s != nil { + if !a.s.TrailersOnly() { return false, err } // TODO(retry): Move down if the spec changes to not check server pushback // before considering this a failure for throttling. - sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + sps := a.s.Trailer()["grpc-retry-pushback-ms"] if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { @@ -595,10 +612,10 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { } var code codes.Code - if cs.attempt.s != nil { - code = cs.attempt.s.Status().Code() + if a.s != nil { + code = a.s.Status().Code() } else { - code = status.Convert(err).Code() + code = status.Code(err) } rp := cs.methodConfig.RetryPolicy @@ -643,19 +660,24 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { } // Returns nil if a retry was performed and succeeded; error otherwise. -func (cs *clientStream) retryLocked(lastErr error) error { +func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { for { - cs.attempt.finish(toRPCErr(lastErr)) - isTransparent, err := cs.shouldRetry(lastErr) + attempt.finish(toRPCErr(lastErr)) + isTransparent, err := attempt.shouldRetry(lastErr) if err != nil { cs.commitAttemptLocked() return err } cs.firstAttempt = false - if err := cs.newAttemptLocked(isTransparent); err != nil { + attempt, err = cs.newAttemptLocked(isTransparent) + if err != nil { + // Only returns error if the clientconn is closed or the context of + // the stream is canceled. return err } - if lastErr = cs.replayBufferLocked(); lastErr == nil { + // Note that the first op in the replay buffer always sets cs.attempt + // if it is able to pick a transport and create a stream. + if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil } } @@ -665,7 +687,10 @@ func (cs *clientStream) Context() context.Context { cs.commitAttempt() // No need to lock before using attempt, since we know it is committed and // cannot change. - return cs.attempt.s.Context() + if cs.attempt.s != nil { + return cs.attempt.s.Context() + } + return cs.ctx } func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { @@ -695,7 +720,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) cs.mu.Unlock() return err } - if err := cs.retryLocked(err); err != nil { + if err := cs.retryLocked(a, err); err != nil { cs.mu.Unlock() return err } @@ -726,7 +751,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { cs.binlog.Log(logEntry) cs.serverHeaderBinlogged = true } - return m, err + return m, nil } func (cs *clientStream) Trailer() metadata.MD { @@ -744,10 +769,9 @@ func (cs *clientStream) Trailer() metadata.MD { return cs.attempt.s.Trailer() } -func (cs *clientStream) replayBufferLocked() error { - a := cs.attempt +func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { for _, f := range cs.buffer { - if err := f(a); err != nil { + if err := f(attempt); err != nil { return err } } @@ -795,22 +819,17 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { if len(payload) > *cs.callInfo.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) } - msgBytes := data // Store the pointer before setting to nil. For binary logging. op := func(a *csAttempt) error { - err := a.sendMsg(m, hdr, payload, data) - // nil out the message and uncomp when replaying; they are only needed for - // stats which is disabled for subsequent attempts. - m, data = nil, nil - return err + return a.sendMsg(m, hdr, payload, data) } err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) if cs.binlog != nil && err == nil { cs.binlog.Log(&binarylog.ClientMessage{ OnClientSide: true, - Message: msgBytes, + Message: data, }) } - return + return err } func (cs *clientStream) RecvMsg(m interface{}) error { @@ -1362,8 +1381,10 @@ func (as *addrConnStream) finish(err error) { // ServerStream defines the server-side behavior of a streaming RPC. // -// All errors returned from ServerStream methods are compatible with the -// status package. +// Errors returned from ServerStream methods are compatible with the status +// package. However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. type ServerStream interface { // SetHeader sets the header metadata. It may be called multiple times. // When call multiple times, all the provided metadata will be merged. @@ -1426,7 +1447,7 @@ type serverStream struct { statsHandler stats.Handler - binlog *binarylog.MethodLogger + binlog binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It // will happen when one of the following two happens: stream.SendHeader(), // stream.Send(). @@ -1446,11 +1467,20 @@ func (ss *serverStream) SetHeader(md metadata.MD) error { if md.Len() == 0 { return nil } + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } return ss.s.SetHeader(md) } func (ss *serverStream) SendHeader(md metadata.MD) error { - err := ss.t.WriteHeader(ss.s, md) + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = ss.t.WriteHeader(ss.s, md) if ss.binlog != nil && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() ss.binlog.Log(&binarylog.ServerHeader{ @@ -1465,6 +1495,9 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { if md.Len() == 0 { return } + if err := imetadata.Validate(md); err != nil { + logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) + } ss.s.SetTrailer(md) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 48594bc24..5bc03f9b3 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.41.0" +const Version = "1.47.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index d923187a7..ceb436c6c 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -107,7 +107,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do go vet -all ./... | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" - golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:" + golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" go mod tidy git status --porcelain 2>&1 | fail_on_output || \ diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index a427f8b70..9c61112f5 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -21,10 +21,11 @@ import ( type Number int32 const ( - MinValidNumber Number = 1 - FirstReservedNumber Number = 19000 - LastReservedNumber Number = 19999 - MaxValidNumber Number = 1<<29 - 1 + MinValidNumber Number = 1 + FirstReservedNumber Number = 19000 + LastReservedNumber Number = 19999 + MaxValidNumber Number = 1<<29 - 1 + DefaultRecursionLimit = 10000 ) // IsValid reports whether the field number is semantically valid. @@ -55,6 +56,7 @@ const ( errCodeOverflow errCodeReserved errCodeEndGroup + errCodeRecursionDepth ) var ( @@ -112,6 +114,10 @@ func ConsumeField(b []byte) (Number, Type, int) { // When parsing a group, the length includes the end group marker and // the end group is verified to match the starting field number. func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { + return consumeFieldValueD(num, typ, b, DefaultRecursionLimit) +} + +func consumeFieldValueD(num Number, typ Type, b []byte, depth int) (n int) { switch typ { case VarintType: _, n = ConsumeVarint(b) @@ -126,6 +132,9 @@ func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { _, n = ConsumeBytes(b) return n case StartGroupType: + if depth < 0 { + return errCodeRecursionDepth + } n0 := len(b) for { num2, typ2, n := ConsumeTag(b) @@ -140,7 +149,7 @@ func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { return n0 - len(b) } - n = ConsumeFieldValue(num2, typ2, b) + n = consumeFieldValueD(num2, typ2, b, depth-1) if n < 0 { return n // forward error code } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index eb10ea102..37803773f 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -381,7 +381,7 @@ func (d *Decoder) currentOpenKind() (Kind, byte) { case '[': return ListOpen, ']' } - panic(fmt.Sprintf("Decoder: openStack contains invalid byte %s", string(openCh))) + panic(fmt.Sprintf("Decoder: openStack contains invalid byte %c", openCh)) } func (d *Decoder) pushOpenStack(ch byte) { diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go index f90e909b3..fbcd34920 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.13 // +build !go1.13 package errors diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go index dc05f4191..5e72f1cde 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.13 // +build go1.13 package errors diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go index a72995f02..bda8e8cf3 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !protolegacy // +build !protolegacy package flags diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go index 772e2f0e4..6d8d9bd6b 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build protolegacy // +build protolegacy package flags diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go index 2706bb67f..4b15493f2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.12 // +build !go1.12 package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go index 1533ef600..0b31b66ea 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.12 // +build go1.12 package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go index 90705e3ae..145c577bd 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index e118af1e2..757642e23 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go index 949dc49a6..c65b0325c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -18,6 +18,7 @@ import ( ) var errDecode = errors.New("cannot parse invalid wire-format data") +var errRecursionDepth = errors.New("exceeded maximum recursion depth") type unmarshalOptions struct { flags protoiface.UnmarshalInputFlags @@ -25,6 +26,7 @@ type unmarshalOptions struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + depth int } func (o unmarshalOptions) Options() proto.UnmarshalOptions { @@ -44,6 +46,7 @@ func (o unmarshalOptions) IsDefault() bool { var lazyUnmarshalOptions = unmarshalOptions{ resolver: preg.GlobalTypes, + depth: protowire.DefaultRecursionLimit, } type unmarshalOutput struct { @@ -62,6 +65,7 @@ func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutp out, err := mi.unmarshalPointer(in.Buf, p, 0, unmarshalOptions{ flags: in.Flags, resolver: in.Resolver, + depth: in.Depth, }) var flags piface.UnmarshalOutputFlags if out.initialized { @@ -82,6 +86,10 @@ var errUnknown = errors.New("unknown") func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { mi.init() + opts.depth-- + if opts.depth < 0 { + return out, errRecursionDepth + } if flags.ProtoLegacy && mi.isMessageSet { return unmarshalMessageSet(mi, b, p, opts) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 9e3ed821e..4c491bdf4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 9ecf23a85..ee0e0573e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go index 85e074c97..a1f6f3338 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go index 2160c7019..56a8a4ed3 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package strs diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 14e774fb2..3d40d5249 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,8 +52,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 27 - Patch = 1 + Minor = 28 + Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 49f9b8c88..11bf7173b 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -42,18 +42,25 @@ type UnmarshalOptions struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int } // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m Message) error { - _, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect()) + _, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect()) return err } // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } _, err := o.unmarshal(b, m.ProtoReflect()) return err } @@ -63,6 +70,9 @@ func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { // This method permits fine-grained control over the unmarshaler. // Most users should use Unmarshal instead. func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } return o.unmarshal(in.Buf, in.Message) } @@ -86,12 +96,17 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto Message: m, Buf: b, Resolver: o.Resolver, + Depth: o.RecursionLimit, } if o.DiscardUnknown { in.Flags |= protoiface.UnmarshalDiscardUnknown } out, err = methods.Unmarshal(in) } else { + o.RecursionLimit-- + if o.RecursionLimit < 0 { + return out, errors.New("exceeded max recursion depth") + } err = o.unmarshalMessageSlow(b, m) } if err != nil { diff --git a/vendor/google.golang.org/protobuf/proto/proto_methods.go b/vendor/google.golang.org/protobuf/proto/proto_methods.go index d8dd604f6..465e057b3 100644 --- a/vendor/google.golang.org/protobuf/proto/proto_methods.go +++ b/vendor/google.golang.org/protobuf/proto/proto_methods.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // The protoreflect build tag disables use of fast-path methods. +//go:build !protoreflect // +build !protoreflect package proto diff --git a/vendor/google.golang.org/protobuf/proto/proto_reflect.go b/vendor/google.golang.org/protobuf/proto/proto_reflect.go index b103d4320..494d6ceef 100644 --- a/vendor/google.golang.org/protobuf/proto/proto_reflect.go +++ b/vendor/google.golang.org/protobuf/proto/proto_reflect.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // The protoreflect build tag disables use of fast-path methods. +//go:build protoreflect // +build protoreflect package proto diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index 6be5d16e9..d5d5af6eb 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -53,6 +53,7 @@ type ( FindExtensionByName(field FullName) (ExtensionType, error) FindExtensionByNumber(message FullName, field FieldNumber) (ExtensionType, error) } + Depth int } unmarshalOutput = struct { pragma.NoUnkeyedLiterals diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go index 918e685e1..7ced876f4 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 5a3414724..eb7764c30 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -41,6 +41,31 @@ import ( // Converting to/from a Value and a concrete Go value panics on type mismatch. // For example, ValueOf("hello").Int() panics because this attempts to // retrieve an int64 from a string. +// +// List, Map, and Message Values are called "composite" values. +// +// A composite Value may alias (reference) memory at some location, +// such that changes to the Value updates the that location. +// A composite value acquired with a Mutable method, such as Message.Mutable, +// always references the source object. +// +// For example: +// // Append a 0 to a "repeated int32" field. +// // Since the Value returned by Mutable is guaranteed to alias +// // the source message, modifying the Value modifies the message. +// message.Mutable(fieldDesc).(List).Append(protoreflect.ValueOfInt32(0)) +// +// // Assign [0] to a "repeated int32" field by creating a new Value, +// // modifying it, and assigning it. +// list := message.NewField(fieldDesc).(List) +// list.Append(protoreflect.ValueOfInt32(0)) +// message.Set(fieldDesc, list) +// // ERROR: Since it is not defined whether Set aliases the source, +// // appending to the List here may or may not modify the message. +// list.Append(protoreflect.ValueOfInt32(0)) +// +// Some operations, such as Message.Get, may return an "empty, read-only" +// composite Value. Modifying an empty, read-only value panics. type Value value // The protoreflect API uses a custom Value union type instead of interface{} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go index c45debdca..702ddf22a 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package protoreflect diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 32c04f67e..44cf467d8 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -103,6 +103,7 @@ type UnmarshalInput = struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + Depth int } // UnmarshalOutput is output from the Unmarshal method. diff --git a/vendor/modules.txt b/vendor/modules.txt index d54878430..7fd82cfb0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,5 +1,5 @@ -# cloud.google.com/go v0.99.0 -## explicit; go 1.11 +# cloud.google.com/go/compute v1.7.0 +## explicit; go 1.15 cloud.google.com/go/compute/metadata # github.com/Azure/azure-sdk-for-go v59.3.0+incompatible ## explicit @@ -254,6 +254,7 @@ github.com/gogo/protobuf/sortkeys github.com/golang/groupcache/lru # github.com/golang/protobuf v1.5.2 ## explicit; go 1.9 +github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any @@ -266,8 +267,8 @@ github.com/google/gnostic/extensions github.com/google/gnostic/jsonschema github.com/google/gnostic/openapiv2 github.com/google/gnostic/openapiv3 -# github.com/google/go-cmp v0.5.6 -## explicit; go 1.8 +# github.com/google/go-cmp v0.5.8 +## explicit; go 1.13 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags @@ -279,11 +280,19 @@ github.com/google/gofuzz # github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 ## explicit; go 1.14 github.com/google/pprof/profile -# github.com/googleapis/gax-go/v2 v2.1.1 -## explicit; go 1.11 +# github.com/google/uuid v1.3.0 +## explicit +github.com/google/uuid +# github.com/googleapis/enterprise-certificate-proxy v0.1.0 +## explicit; go 1.18 +github.com/googleapis/enterprise-certificate-proxy/client +github.com/googleapis/enterprise-certificate-proxy/client/util +# github.com/googleapis/gax-go/v2 v2.4.0 +## explicit; go 1.15 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto +github.com/googleapis/gax-go/v2/internal # github.com/gophercloud/gophercloud v0.24.0 ## explicit; go 1.14 github.com/gophercloud/gophercloud @@ -487,7 +496,7 @@ golang.org/x/lint/golint golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.0.0-20220225172249-27dd8689420f +# golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e ## explicit; go 1.17 golang.org/x/net/bpf golang.org/x/net/context @@ -506,8 +515,8 @@ golang.org/x/net/ipv4 golang.org/x/net/ipv6 golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 -## explicit; go 1.11 +# golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2 +## explicit; go 1.15 golang.org/x/oauth2 golang.org/x/oauth2/authhandler golang.org/x/oauth2/google @@ -515,10 +524,10 @@ golang.org/x/oauth2/google/internal/externalaccount golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c +# golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f ## explicit golang.org/x/sync/semaphore -# golang.org/x/sys v0.0.0-20220412211240-33da011f77ad +# golang.org/x/sys v0.0.0-20220624220833-87e55d714810 ## explicit; go 1.17 golang.org/x/sys/execabs golang.org/x/sys/internal/unsafeheader @@ -574,12 +583,12 @@ golang.org/x/tools/internal/imports golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal -# golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 -## explicit; go 1.11 +# golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f +## explicit; go 1.17 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/api v0.63.0 -## explicit; go 1.11 +# google.golang.org/api v0.88.0 +## explicit; go 1.15 google.golang.org/api/dns/v1 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -605,12 +614,12 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 -## explicit; go 1.11 +# google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f +## explicit; go 1.15 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.41.0 +# google.golang.org/grpc v1.47.0 ## explicit; go 1.14 google.golang.org/grpc google.golang.org/grpc/attributes @@ -620,14 +629,17 @@ google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/channelz google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials +google.golang.org/grpc/credentials/insecure google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancer/gracefulswitch google.golang.org/grpc/internal/balancerload google.golang.org/grpc/internal/binarylog google.golang.org/grpc/internal/buffer @@ -639,6 +651,7 @@ google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/metadata +google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/passthrough @@ -648,7 +661,6 @@ google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/internal/transport/networktype -google.golang.org/grpc/internal/xds/env google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer @@ -657,8 +669,8 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.27.1 -## explicit; go 1.9 +# google.golang.org/protobuf v1.28.0 +## explicit; go 1.11 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire