From c948912eff0b2337496b3d77520a327d11e0131d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Jul 2023 23:14:42 +0000 Subject: [PATCH] Bump github.com/containers/common from 0.53.0 to 0.55.2 Bumps [github.com/containers/common](https://github.com/containers/common) from 0.53.0 to 0.55.2. - [Release notes](https://github.com/containers/common/releases) - [Commits](https://github.com/containers/common/compare/v0.53.0...v0.55.2) --- updated-dependencies: - dependency-name: github.com/containers/common dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 48 +- go.sum | 99 +- .../mergo/.deepsource.toml | 2 +- .../imdario => dario.cat}/mergo/.gitignore | 0 .../imdario => dario.cat}/mergo/.travis.yml | 0 .../mergo/CODE_OF_CONDUCT.md | 0 .../mergo/CONTRIBUTING.md | 0 .../imdario => dario.cat}/mergo/LICENSE | 0 .../imdario => dario.cat}/mergo/README.md | 42 +- .../imdario => dario.cat}/mergo/SECURITY.md | 0 .../imdario => dario.cat}/mergo/doc.go | 43 +- .../imdario => dario.cat}/mergo/map.go | 0 .../imdario => dario.cat}/mergo/merge.go | 0 .../imdario => dario.cat}/mergo/mergo.go | 0 .../github.com/Azure/go-ansiterm/SECURITY.md | 41 + vendor/github.com/Microsoft/hcsshim/Makefile | 18 +- .../Microsoft/hcsshim/internal/log/format.go | 85 ++ .../Microsoft/hcsshim/internal/log/hook.go | 145 ++- .../Microsoft/hcsshim/internal/log/scrub.go | 22 +- .../Microsoft/hcsshim/internal/oc/errors.go | 69 ++ .../Microsoft/hcsshim/internal/oc/exporter.go | 77 +- .../Microsoft/hcsshim/internal/oc/span.go | 14 +- .../internal/security/grantvmgroupaccess.go | 14 +- .../containers/common/libimage/filters.go | 18 +- .../containers/common/libimage/history.go | 8 +- .../containers/common/libimage/image.go | 76 +- .../containers/common/libimage/import.go | 12 +- .../containers/common/libimage/layer_tree.go | 29 + .../common/libimage/manifest_list.go | 15 +- .../common/libimage/manifests/manifests.go | 2 +- .../containers/common/libimage/normalize.go | 10 +- .../containers/common/libimage/pull.go | 13 +- .../containers/common/libimage/runtime.go | 142 +-- .../common/libnetwork/types/const.go | 12 +- .../common/libnetwork/types/define.go | 9 +- .../common/libnetwork/types/network.go | 33 + .../common/libnetwork/util/filters.go | 2 +- .../containers/common/libnetwork/util/ip.go | 22 + .../common/pkg/cgroups/cpuset_linux.go | 2 +- .../containers/common/pkg/config/config.go | 17 + .../common/pkg/config/config_darwin.go | 4 +- .../common/pkg/config/containers.conf | 42 +- .../common/pkg/config/containers.conf-freebsd | 7 + .../containers/common/pkg/config/default.go | 16 +- .../common/pkg/config/default_common.go | 7 + .../common/pkg/config/default_freebsd.go | 3 + .../common/pkg/manifests/manifests.go | 3 +- .../containers/common/pkg/retry/retry.go | 29 +- .../common/pkg/supplemented/supplemented.go | 2 +- .../containers/common/pkg/util/util.go | 167 +++- .../containers/common/version/version.go | 2 +- .../containers/image/v5/copy/blob.go | 4 +- .../containers/image/v5/copy/encryption.go | 92 +- .../containers/image/v5/copy/manifest.go | 44 +- .../containers/image/v5/copy/multiple.go | 85 +- .../containers/image/v5/copy/single.go | 8 +- .../image/v5/docker/daemon/client.go | 52 +- .../image/v5/docker/docker_client.go | 15 +- .../image/v5/internal/image/docker_schema2.go | 4 +- .../containers/image/v5/internal/image/oci.go | 6 +- .../internal/manifest/docker_schema2_list.go | 74 +- .../image/v5/internal/manifest/list.go | 35 + .../image/v5/internal/manifest/oci_index.go | 132 ++- .../containers/image/v5/internal/set/set.go | 4 +- .../containers/image/v5/manifest/oci.go | 16 +- .../image/v5/oci/layout/oci_transport.go | 12 +- .../image/v5/openshift/openshift-copies.go | 4 +- .../image/v5/pkg/docker/config/config.go | 124 +-- .../github.com/containers/image/v5/sif/src.go | 10 +- .../image/v5/tarball/tarball_src.go | 161 ++-- .../containers/image/v5/types/types.go | 4 +- .../containers/image/v5/version/version.go | 4 +- vendor/github.com/go-logr/logr/.golangci.yaml | 3 - vendor/github.com/go-logr/logr/discard.go | 32 +- vendor/github.com/go-logr/logr/funcr/funcr.go | 27 +- vendor/github.com/go-logr/logr/logr.go | 166 ++-- vendor/github.com/go-openapi/swag/util.go | 16 +- .../go-containerregistry/pkg/name/registry.go | 6 + .../github.com/google/pprof/profile/encode.go | 85 +- .../github.com/google/pprof/profile/filter.go | 4 + .../google/pprof/profile/legacy_profile.go | 31 +- .../github.com/google/pprof/profile/merge.go | 278 +++++- .../google/pprof/profile/profile.go | 61 +- .../github.com/google/pprof/profile/proto.go | 19 +- .../github.com/google/pprof/profile/prune.go | 26 +- vendor/github.com/moby/term/doc.go | 3 + vendor/github.com/moby/term/tc.go | 20 - vendor/github.com/moby/term/term.go | 116 +-- vendor/github.com/moby/term/term_unix.go | 98 ++ vendor/github.com/moby/term/term_windows.go | 99 +- .../moby/term/{termios.go => termios_unix.go} | 13 +- .../github.com/moby/term/termios_windows.go | 37 + .../moby/term/windows/ansi_reader.go | 4 +- .../github.com/moby/term/windows/console.go | 7 +- vendor/github.com/moby/term/winsize.go | 21 - vendor/github.com/onsi/ginkgo/v2/.gitignore | 2 +- vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 83 ++ .../v2/ginkgo/generators/generate_command.go | 5 + .../ginkgo/generators/generate_templates.go | 6 +- .../v2/ginkgo/generators/generators_common.go | 12 + .../github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go | 3 + .../onsi/ginkgo/v2/internal/focus.go | 71 +- .../interrupt_handler/interrupt_handler.go | 47 +- .../onsi/ginkgo/v2/internal/node.go | 9 + .../onsi/ginkgo/v2/internal/ordering.go | 51 +- .../v2/internal/output_interceptor_unix.go | 11 + .../onsi/ginkgo/v2/internal/suite.go | 8 + .../internal/testingtproxy/testing_t_proxy.go | 4 + .../onsi/ginkgo/v2/internal/writer.go | 2 +- .../onsi/ginkgo/v2/reporters/json_report.go | 13 +- .../onsi/ginkgo/v2/reporters/junit_report.go | 13 +- .../ginkgo/v2/reporters/teamcity_report.go | 4 + .../ginkgo/v2/types/deprecation_support.go | 2 +- .../onsi/ginkgo/v2/types/version.go | 2 +- vendor/github.com/onsi/gomega/.gitignore | 2 +- vendor/github.com/onsi/gomega/CHANGELOG.md | 24 + vendor/github.com/onsi/gomega/gomega_dsl.go | 2 +- .../gomega/matchers/have_exact_elements.go | 7 +- .../image-spec/specs-go/v1/annotations.go | 3 - .../image-spec/specs-go/v1/artifact.go | 34 - .../image-spec/specs-go/v1/config.go | 34 +- .../image-spec/specs-go/v1/manifest.go | 11 + .../image-spec/specs-go/v1/mediatype.go | 19 +- .../image-spec/specs-go/version.go | 2 +- .../fulcio/pkg/certificate/extensions.go | 34 +- .../sigstore/pkg/signature/payload/payload.go | 23 +- .../sylabs/sif/v2/pkg/sif/create.go | 54 +- vendor/github.com/vbauerster/mpb/v8/README.md | 4 +- .../vbauerster/mpb/v8/decor/counters.go | 188 ++-- .../github.com/vbauerster/mpb/v8/decor/eta.go | 3 +- .../vbauerster/mpb/v8/decor/on_abort.go | 3 +- .../vbauerster/mpb/v8/decor/on_complete.go | 3 +- .../vbauerster/mpb/v8/decor/percentage.go | 30 +- .../vbauerster/mpb/v8/decor/pool.go | 10 - .../vbauerster/mpb/v8/decor/size_type.go | 43 +- .../vbauerster/mpb/v8/decor/speed.go | 52 +- .../vbauerster/mpb/v8/heap_manager.go | 7 +- .../github.com/vbauerster/mpb/v8/progress.go | 4 +- vendor/golang.org/x/exp/slices/slices.go | 28 +- vendor/golang.org/x/exp/slices/sort.go | 10 +- vendor/golang.org/x/net/http2/server.go | 9 +- vendor/golang.org/x/net/http2/transport.go | 21 +- vendor/golang.org/x/net/http2/writesched.go | 3 +- .../x/net/http2/writesched_roundrobin.go | 119 +++ vendor/golang.org/x/sync/errgroup/errgroup.go | 10 +- vendor/golang.org/x/sync/errgroup/go120.go | 14 + .../golang.org/x/sync/errgroup/pre_go120.go | 15 + .../x/tools/go/gcexportdata/gcexportdata.go | 11 +- .../golang.org/x/tools/go/packages/golist.go | 23 +- .../x/tools/go/packages/packages.go | 3 + .../x/tools/go/types/objectpath/objectpath.go | 762 --------------- .../x/tools/internal/event/tag/tag.go | 59 ++ .../x/tools/internal/gcimporter/bexport.go | 852 ---------------- .../x/tools/internal/gcimporter/bimport.go | 907 +----------------- .../x/tools/internal/gcimporter/gcimporter.go | 15 +- .../x/tools/internal/gcimporter/iexport.go | 27 +- .../x/tools/internal/gcimporter/iimport.go | 43 +- .../tools/internal/gcimporter/ureader_yes.go | 9 + .../x/tools/internal/gocommand/invoke.go | 146 ++- .../x/tools/internal/gocommand/version.go | 18 +- .../internal/tokeninternal/tokeninternal.go | 92 ++ .../x/tools/internal/typesinternal/types.go | 9 - vendor/modules.txt | 64 +- 163 files changed, 3579 insertions(+), 4119 deletions(-) rename vendor/{github.com/imdario => dario.cat}/mergo/.deepsource.toml (72%) rename vendor/{github.com/imdario => dario.cat}/mergo/.gitignore (100%) rename vendor/{github.com/imdario => dario.cat}/mergo/.travis.yml (100%) rename vendor/{github.com/imdario => dario.cat}/mergo/CODE_OF_CONDUCT.md (100%) rename vendor/{github.com/imdario => dario.cat}/mergo/CONTRIBUTING.md (100%) rename vendor/{github.com/imdario => dario.cat}/mergo/LICENSE (100%) rename vendor/{github.com/imdario => dario.cat}/mergo/README.md (89%) rename vendor/{github.com/imdario => dario.cat}/mergo/SECURITY.md (100%) rename vendor/{github.com/imdario => dario.cat}/mergo/doc.go (88%) rename vendor/{github.com/imdario => dario.cat}/mergo/map.go (100%) rename vendor/{github.com/imdario => dario.cat}/mergo/merge.go (100%) rename vendor/{github.com/imdario => dario.cat}/mergo/mergo.go (100%) create mode 100644 vendor/github.com/Azure/go-ansiterm/SECURITY.md create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/log/format.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go create mode 100644 vendor/github.com/containers/common/pkg/config/default_common.go create mode 100644 vendor/github.com/moby/term/doc.go delete mode 100644 vendor/github.com/moby/term/tc.go create mode 100644 vendor/github.com/moby/term/term_unix.go rename vendor/github.com/moby/term/{termios.go => termios_unix.go} (50%) create mode 100644 vendor/github.com/moby/term/termios_windows.go delete mode 100644 vendor/github.com/moby/term/winsize.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go delete mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/pool.go create mode 100644 vendor/golang.org/x/net/http2/writesched_roundrobin.go create mode 100644 vendor/golang.org/x/sync/errgroup/go120.go create mode 100644 vendor/golang.org/x/sync/errgroup/pre_go120.go delete mode 100644 vendor/golang.org/x/tools/go/types/objectpath/objectpath.go create mode 100644 vendor/golang.org/x/tools/internal/event/tag/tag.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/bexport.go diff --git a/go.mod b/go.mod index 04b75aae4..031ac1276 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.18 require ( github.com/BurntSushi/toml v1.3.2 github.com/containers/buildah v1.30.0 - github.com/containers/common v0.53.0 + github.com/containers/common v0.55.2 github.com/containers/podman/v4 v4.5.1 github.com/containers/storage v1.48.0 github.com/docker/distribution v2.8.2+incompatible @@ -15,8 +15,8 @@ require ( github.com/hashicorp/go-multierror v1.1.1 github.com/hinshun/vt10x v0.0.0-20220301184237-5011da428d02 github.com/navidys/tvxwidgets v0.3.0 - github.com/onsi/ginkgo/v2 v2.9.2 - github.com/onsi/gomega v1.27.6 + github.com/onsi/ginkgo/v2 v2.11.0 + github.com/onsi/gomega v1.27.8 github.com/pkg/errors v0.9.1 github.com/rivo/tview v0.0.0-20220307222120-9994674d60a8 github.com/rs/zerolog v1.29.1 @@ -26,9 +26,10 @@ require ( ) require ( - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + dario.cat/mergo v1.0.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/Microsoft/hcsshim v0.10.0-rc.7 // indirect + github.com/Microsoft/hcsshim v0.10.0-rc.8 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect @@ -36,21 +37,21 @@ require ( github.com/chzyer/readline v1.5.1 // indirect github.com/container-orchestrated-devices/container-device-interface v0.5.4 // indirect github.com/containerd/cgroups v1.1.0 // indirect - github.com/containerd/containerd v1.7.0 // indirect + github.com/containerd/containerd v1.7.2 // indirect github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect - github.com/containers/image/v5 v5.25.0 // indirect + github.com/containers/image/v5 v5.26.1 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.1.7 // indirect github.com/containers/psgo v1.8.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/disiqueira/gotree/v3 v3.0.2 // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gdamore/encoding v1.0.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/logr v1.2.4 // indirect github.com/go-openapi/analysis v0.21.4 // indirect github.com/go-openapi/errors v0.20.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect @@ -59,7 +60,7 @@ require ( github.com/go-openapi/runtime v0.26.0 // indirect github.com/go-openapi/spec v0.20.9 // indirect github.com/go-openapi/strfmt v0.21.7 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/swag v0.22.4 // indirect github.com/go-openapi/validate v0.22.1 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.1-0.20221029134443-4b691ce883d5 // indirect @@ -67,14 +68,13 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cmp v0.5.9 // indirect - github.com/google/go-containerregistry v0.14.0 // indirect + github.com/google/go-containerregistry v0.15.2 // indirect github.com/google/go-intervals v0.0.2 // indirect - github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect + github.com/google/pprof v0.0.0-20230323073829-e72429f035bd // indirect github.com/google/uuid v1.3.0 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/gorilla/schema v1.2.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/imdario/mergo v0.3.15 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jinzhu/copier v0.3.5 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -94,13 +94,13 @@ require ( github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect - github.com/moby/term v0.0.0-20221120202655-abb19827d345 // indirect + github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/nxadm/tail v1.4.8 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect + github.com/opencontainers/image-spec v1.1.0-rc3 // indirect github.com/opencontainers/runc v1.1.7 // indirect github.com/opencontainers/runtime-spec v1.1.0-rc.3 // indirect github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69 // indirect @@ -109,31 +109,31 @@ require ( github.com/pkg/sftp v1.13.5 // indirect github.com/proglottis/gpgme v0.1.3 // indirect github.com/rivo/uniseg v0.4.4 // indirect - github.com/sigstore/fulcio v1.2.0 // indirect - github.com/sigstore/rekor v1.2.0 // indirect - github.com/sigstore/sigstore v1.6.4 // indirect + github.com/sigstore/fulcio v1.3.1 // indirect + github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 // indirect + github.com/sigstore/sigstore v1.7.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect - github.com/sylabs/sif/v2 v2.11.1 // indirect + github.com/sylabs/sif/v2 v2.11.5 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/theupdateframework/go-tuf v0.5.2 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/ulikunitz/xz v0.5.11 // indirect github.com/vbatts/tar-split v0.11.3 // indirect - github.com/vbauerster/mpb/v8 v8.3.0 // indirect + github.com/vbauerster/mpb/v8 v8.4.0 // indirect go.etcd.io/bbolt v1.3.7 // indirect go.mongodb.org/mongo-driver v1.11.3 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect + golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect golang.org/x/mod v0.10.0 // indirect - golang.org/x/net v0.10.0 // indirect - golang.org/x/sync v0.2.0 // indirect + golang.org/x/net v0.11.0 // indirect + golang.org/x/sync v0.3.0 // indirect golang.org/x/sys v0.9.0 // indirect golang.org/x/term v0.9.0 // indirect golang.org/x/text v0.10.0 // indirect - golang.org/x/tools v0.7.0 // indirect + golang.org/x/tools v0.9.3 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/grpc v1.55.0 // indirect google.golang.org/protobuf v1.30.0 // indirect diff --git a/go.sum b/go.sum index dd137d16b..1bb8e4547 100644 --- a/go.sum +++ b/go.sum @@ -22,12 +22,14 @@ cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIA cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= @@ -64,8 +66,8 @@ github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= -github.com/Microsoft/hcsshim v0.10.0-rc.7 h1:HBytQPxcv8Oy4244zbQbe6hnOnx544eL5QPUqhJldz8= -github.com/Microsoft/hcsshim v0.10.0-rc.7/go.mod h1:ILuwjA+kNW+MrN/w5un7n3mTqkwsFu4Bp05/okFUZlE= +github.com/Microsoft/hcsshim v0.10.0-rc.8 h1:YSZVvlIIDD1UxQpJp0h+dnpLUw+TrY0cx8obKsp3bek= +github.com/Microsoft/hcsshim v0.10.0-rc.8/go.mod h1:OEthFdQv/AD2RAdzR6Mm1N1KPCztGKDurW1Z8b8VGMM= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -173,8 +175,8 @@ github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09Zvgq github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.7.0 h1:G/ZQr3gMZs6ZT0qPUZ15znx5QSdQdASW11nXTLTM2Pg= -github.com/containerd/containerd v1.7.0/go.mod h1:QfR7Efgb/6X2BDpTPJRvPTYDE9rsF0FsXX9J8sIs/sc= +github.com/containerd/containerd v1.7.2 h1:UF2gdONnxO8I6byZXDi5sXWiWvlW3D/sci7dTQimEJo= +github.com/containerd/containerd v1.7.2/go.mod h1:afcz74+K10M/+cjGHIVQrCt3RAQhUSCAjJ9iMYhhkuI= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -228,10 +230,10 @@ github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHV github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= github.com/containers/buildah v1.30.0 h1:mdp2COGKFFEZNEGP8VZ5ITuUFVNPFoH+iK2sSesNfTA= github.com/containers/buildah v1.30.0/go.mod h1:lyMLZIevpAa6zSzjRl7z4lFJMCMQLFjfo56YIefaB/U= -github.com/containers/common v0.53.0 h1:Ax814cLeX5VXSnkKUdxz762g+27fJj1st4UvKoXmkKs= -github.com/containers/common v0.53.0/go.mod h1:pABPxJwlTE8oYk9/2BW0e0mumkuhJHIPsABHTGRXN3w= -github.com/containers/image/v5 v5.25.0 h1:TJ0unmalbU+scd0i3Txap2wjGsAnv06MSCwgn6bsizk= -github.com/containers/image/v5 v5.25.0/go.mod h1:EKvys0WVlRFkDw26R8y52TuhV9Tfn0yq2luLX6W52Ls= +github.com/containers/common v0.55.2 h1:Cd+vmkUPDrPvL2v4Te1Wew6SIZdn4/XiyiBRT9IbcGg= +github.com/containers/common v0.55.2/go.mod h1:ZKPllYOZ2xj2rgWRdnHHVvWg6ru4BT28En8mO8DMMPk= +github.com/containers/image/v5 v5.26.1 h1:8y3xq8GO/6y8FR+nAedHPsAFiAtOrab9qHTBpbqaX8g= +github.com/containers/image/v5 v5.26.1/go.mod h1:IwlOGzTkGnmfirXxt0hZeJlzv1zVukE03WZQ203Z9GA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= @@ -268,9 +270,9 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsr github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= -github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 h1:vU+EP9ZuFUCYE0NYLwTSob+3LNEJATzNfP/DC7SWGWI= -github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 h1:8Pq5UNTC+/UfvcOPKQGZoKCkeF+ZaKa4wJ9OS2gsQQM= +github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= @@ -354,8 +356,8 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= @@ -396,8 +398,8 @@ github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -494,8 +496,8 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= -github.com/google/go-containerregistry v0.14.0 h1:z58vMqHxuwvAsVwvKEkmVBz2TlgBgH5k6koEXBtlYkw= -github.com/google/go-containerregistry v0.14.0/go.mod h1:aiJ2fp/SXvkWgmYHioXnbMdlgB8eXiiYOY55gfN91Wk= +github.com/google/go-containerregistry v0.15.2 h1:MMkSh+tjSdnmJZO7ljvEqV1DjfekB6VUEAZgy3a+TQE= +github.com/google/go-containerregistry v0.15.2/go.mod h1:wWK+LnOv4jXMM23IT/F1wdYftGWGr47Is8CG+pmHK1Q= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -507,8 +509,8 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBxH5Pj7KeFK5l+Y3FsgT8keqKtk= +github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -554,14 +556,11 @@ github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXp github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= -github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -676,8 +675,8 @@ github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vyg github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= -github.com/moby/term v0.0.0-20221120202655-abb19827d345 h1:J9c53/kxIH+2nTKBEfZYFMlhghtHpIHSXpm5VRGHSnU= -github.com/moby/term v0.0.0-20221120202655-abb19827d345/go.mod h1:15ce4BGCFxt7I5NQKT+HV0yEDxmf6fSysfEDiVo3zFM= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -712,8 +711,8 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU= -github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= +github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= +github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -721,8 +720,8 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= +github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -732,8 +731,8 @@ github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3I github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= -github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8= +github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc h1:qjkUzmFsOFbQyjObybk40mRida83j5IHRaKzLGdBbEU= github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc/go.mod h1:wUOQGsiKae6VzA/UvlCK3cO+pHk8F2VQHlIoITEfMM8= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -829,12 +828,12 @@ github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624 github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sigstore/fulcio v1.2.0 h1:I4H764cDbryKXkPtasUvo8bcix/7xLvkxWYWNp+JtWI= -github.com/sigstore/fulcio v1.2.0/go.mod h1:FS7qpBvOEqs0uEh1+hJxzxtJistWN29ybLtAzFNUi0c= -github.com/sigstore/rekor v1.2.0 h1:ahlnoEY3zo8Vc+eZLPobamw6YfBTAbI0lthzUQd6qe4= -github.com/sigstore/rekor v1.2.0/go.mod h1:zcFO54qIg2G1/i0sE/nvmELUOng/n0MPjTszRYByVPo= -github.com/sigstore/sigstore v1.6.4 h1:jH4AzR7qlEH/EWzm+opSpxCfuUcjHL+LJPuQE7h40WE= -github.com/sigstore/sigstore v1.6.4/go.mod h1:pjR64lBxnjoSrAr+Ydye/FV73IfrgtoYlAI11a8xMfA= +github.com/sigstore/fulcio v1.3.1 h1:0ntW9VbQbt2JytoSs8BOGB84A65eeyvGSavWteYp29Y= +github.com/sigstore/fulcio v1.3.1/go.mod h1:/XfqazOec45ulJZpyL9sq+OsVQ8g2UOVoNVi7abFgqU= +github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 h1:x/WnxasgR40qGY67IHwioakXLuhDxJ10vF8/INuOTiI= +github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12/go.mod h1:8c+a8Yo7r8gKuYbIaz+c3oOdw9iMXx+tMdOg2+b+2jQ= +github.com/sigstore/sigstore v1.7.1 h1:fCATemikcBK0cG4+NcM940MfoIgmioY1vC6E66hXxks= +github.com/sigstore/sigstore v1.7.1/go.mod h1:0PmMzfJP2Y9+lugD0wer4e7TihR5tM7NcIs3bQNk5xg= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -886,8 +885,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/sylabs/sif/v2 v2.11.1 h1:d09yPukVa8b74wuy+QTA4Is3w8MH0UjO/xlWQUuFzpY= -github.com/sylabs/sif/v2 v2.11.1/go.mod h1:i4GcKLOaT4ertznbsuf11d/G9zLEfUZa7YhrFc5L6YQ= +github.com/sylabs/sif/v2 v2.11.5 h1:7ssPH3epSonsTrzbS1YxeJ9KuqAN7ISlSM61a7j/mQM= +github.com/sylabs/sif/v2 v2.11.5/go.mod h1:GBoZs9LU3e4yJH1dcZ3Akf/jsqYgy5SeguJQC+zd75Y= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= @@ -918,8 +917,8 @@ github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck= github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY= -github.com/vbauerster/mpb/v8 v8.3.0 h1:xw2eMJ6v5NP8Rd7yOVzU6OqnRPrS1yWAoLTrWe7W4Nc= -github.com/vbauerster/mpb/v8 v8.3.0/go.mod h1:bngtYUAu25QGxcYYglsF6oyoHlC9Yhh582xF9LjfmL4= +github.com/vbauerster/mpb/v8 v8.4.0 h1:Jq2iNA7T6SydpMVOwaT+2OBWlXS9Th8KEvBqeu5eeTo= +github.com/vbauerster/mpb/v8 v8.4.0/go.mod h1:vjp3hSTuCtR+x98/+2vW3eZ8XzxvGoP8CPseHMhiPyc= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= @@ -1007,8 +1006,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1075,8 +1074,8 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1 golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= +golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1095,8 +1094,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1262,8 +1261,8 @@ golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4X golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM= +golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/imdario/mergo/.deepsource.toml b/vendor/dario.cat/mergo/.deepsource.toml similarity index 72% rename from vendor/github.com/imdario/mergo/.deepsource.toml rename to vendor/dario.cat/mergo/.deepsource.toml index 8a0681af8..a8bc979e0 100644 --- a/vendor/github.com/imdario/mergo/.deepsource.toml +++ b/vendor/dario.cat/mergo/.deepsource.toml @@ -9,4 +9,4 @@ name = "go" enabled = true [analyzers.meta] - import_path = "github.com/imdario/mergo" \ No newline at end of file + import_path = "dario.cat/mergo" \ No newline at end of file diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/dario.cat/mergo/.gitignore similarity index 100% rename from vendor/github.com/imdario/mergo/.gitignore rename to vendor/dario.cat/mergo/.gitignore diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/dario.cat/mergo/.travis.yml similarity index 100% rename from vendor/github.com/imdario/mergo/.travis.yml rename to vendor/dario.cat/mergo/.travis.yml diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md similarity index 100% rename from vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md rename to vendor/dario.cat/mergo/CODE_OF_CONDUCT.md diff --git a/vendor/github.com/imdario/mergo/CONTRIBUTING.md b/vendor/dario.cat/mergo/CONTRIBUTING.md similarity index 100% rename from vendor/github.com/imdario/mergo/CONTRIBUTING.md rename to vendor/dario.cat/mergo/CONTRIBUTING.md diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/dario.cat/mergo/LICENSE similarity index 100% rename from vendor/github.com/imdario/mergo/LICENSE rename to vendor/dario.cat/mergo/LICENSE diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/dario.cat/mergo/README.md similarity index 89% rename from vendor/github.com/imdario/mergo/README.md rename to vendor/dario.cat/mergo/README.md index 4f0287498..7d0cf9f32 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/dario.cat/mergo/README.md @@ -1,17 +1,20 @@ # Mergo -[![GoDoc][3]][4] [![GitHub release][5]][6] [![GoCard][7]][8] -[![Build Status][1]][2] -[![Coverage Status][9]][10] +[![Test status][1]][2] +[![OpenSSF Scorecard][21]][22] +[![OpenSSF Best Practices][19]][20] +[![Coverage status][9]][10] [![Sourcegraph][11]][12] -[![FOSSA Status][13]][14] +[![FOSSA status][13]][14] + +[![GoDoc][3]][4] [![Become my sponsor][15]][16] [![Tidelift][17]][18] -[1]: https://travis-ci.org/imdario/mergo.png -[2]: https://travis-ci.org/imdario/mergo +[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master +[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml [3]: https://godoc.org/github.com/imdario/mergo?status.svg [4]: https://godoc.org/github.com/imdario/mergo [5]: https://img.shields.io/github/release/imdario/mergo.svg @@ -28,6 +31,10 @@ [16]: https://github.com/sponsors/imdario [17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo [18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo +[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge +[20]: https://bestpractices.coreinfrastructure.org/projects/7177 +[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge +[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. @@ -39,13 +46,19 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). -### Important note +### Important notes + +#### 1.0.0 + +In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. + +#### 0.3.9 Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u dario.cat/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). ### Donations @@ -103,11 +116,11 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont ## Install - go get github.com/imdario/mergo + go get dario.cat/mergo // use in your .go code import ( - "github.com/imdario/mergo" + "dario.cat/mergo" ) ## Usage @@ -145,7 +158,7 @@ package main import ( "fmt" - "github.com/imdario/mergo" + "dario.cat/mergo" ) type Foo struct { @@ -181,9 +194,9 @@ package main import ( "fmt" - "github.com/imdario/mergo" - "reflect" - "time" + "dario.cat/mergo" + "reflect" + "time" ) type timeTransformer struct { @@ -232,5 +245,4 @@ Written by [Dario Castañé](http://dario.im). [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/SECURITY.md b/vendor/dario.cat/mergo/SECURITY.md similarity index 100% rename from vendor/github.com/imdario/mergo/SECURITY.md rename to vendor/dario.cat/mergo/SECURITY.md diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/dario.cat/mergo/doc.go similarity index 88% rename from vendor/github.com/imdario/mergo/doc.go rename to vendor/dario.cat/mergo/doc.go index fcd985f99..7d96ec054 100644 --- a/vendor/github.com/imdario/mergo/doc.go +++ b/vendor/dario.cat/mergo/doc.go @@ -8,30 +8,36 @@ A helper to merge structs and maps in Golang. Useful for configuration default v Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). -Status +# Status It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. -Important note +# Important notes + +1.0.0 + +In 1.0.0 Mergo moves to a vanity URL `dario.cat/mergo`. + +0.3.9 Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u dario.cat/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). -Install +# Install Do your usual installation procedure: - go get github.com/imdario/mergo + go get dario.cat/mergo - // use in your .go code - import ( - "github.com/imdario/mergo" - ) + // use in your .go code + import ( + "dario.cat/mergo" + ) -Usage +# Usage You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). @@ -59,7 +65,7 @@ Here is a nice example: import ( "fmt" - "github.com/imdario/mergo" + "dario.cat/mergo" ) type Foo struct { @@ -81,7 +87,7 @@ Here is a nice example: // {two 2} } -Transformers +# Transformers Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? @@ -89,9 +95,9 @@ Transformers allow to merge specific types differently than in the default behav import ( "fmt" - "github.com/imdario/mergo" - "reflect" - "time" + "dario.cat/mergo" + "reflect" + "time" ) type timeTransformer struct { @@ -127,17 +133,16 @@ Transformers allow to merge specific types differently than in the default behav // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } } -Contact me +# Contact me If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario -About +# About Written by Dario Castañé: https://da.rio.hn -License +# License BSD 3-Clause license, as Go language. - */ package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/dario.cat/mergo/map.go similarity index 100% rename from vendor/github.com/imdario/mergo/map.go rename to vendor/dario.cat/mergo/map.go diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/dario.cat/mergo/merge.go similarity index 100% rename from vendor/github.com/imdario/mergo/merge.go rename to vendor/dario.cat/mergo/merge.go diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/dario.cat/mergo/mergo.go similarity index 100% rename from vendor/github.com/imdario/mergo/mergo.go rename to vendor/dario.cat/mergo/mergo.go diff --git a/vendor/github.com/Azure/go-ansiterm/SECURITY.md b/vendor/github.com/Azure/go-ansiterm/SECURITY.md new file mode 100644 index 000000000..e138ec5d6 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). + + diff --git a/vendor/github.com/Microsoft/hcsshim/Makefile b/vendor/github.com/Microsoft/hcsshim/Makefile index 742c76d84..d8eb30b86 100644 --- a/vendor/github.com/Microsoft/hcsshim/Makefile +++ b/vendor/github.com/Microsoft/hcsshim/Makefile @@ -94,23 +94,9 @@ out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/ho tar -zcf $@ -C rootfs . rm -rf rootfs --include deps/cmd/gcs.gomake --include deps/cmd/gcstools.gomake --include deps/cmd/hooks/wait-paths.gomake --include deps/cmd/tar2ext4.gomake --include deps/internal/tools/snp-report.gomake - -# Implicit rule for includes that define Go targets. -%.gomake: $(SRCROOT)/Makefile +bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report: @mkdir -p $(dir $@) - @/bin/echo $(@:deps/%.gomake=bin/%): $(SRCROOT)/hack/gomakedeps.sh > $@.new - @/bin/echo -e '\t@mkdir -p $$(dir $$@) $(dir $@)' >> $@.new - @/bin/echo -e '\t$$(GO_BUILD) -o $$@.new $$(SRCROOT)/$$(@:bin/%=%)' >> $@.new - @/bin/echo -e '\tGO="$(GO)" $$(SRCROOT)/hack/gomakedeps.sh $$@ $$(SRCROOT)/$$(@:bin/%=%) $$(GO_FLAGS) $$(GO_FLAGS_EXTRA) > $(@:%.gomake=%.godeps).new' >> $@.new - @/bin/echo -e '\tmv $(@:%.gomake=%.godeps).new $(@:%.gomake=%.godeps)' >> $@.new - @/bin/echo -e '\tmv $$@.new $$@' >> $@.new - @/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new - mv $@.new $@ + GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%) bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o @mkdir -p bin diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/format.go b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go new file mode 100644 index 000000000..4b6500333 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go @@ -0,0 +1,85 @@ +package log + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net" + "reflect" + "time" + + "github.com/containerd/containerd/log" +) + +const TimeFormat = log.RFC3339NanoFixed + +func FormatTime(t time.Time) string { + return t.Format(TimeFormat) +} + +// DurationFormat formats a [time.Duration] log entry. +// +// A nil value signals an error with the formatting. +type DurationFormat func(time.Duration) interface{} + +func DurationFormatString(d time.Duration) interface{} { return d.String() } +func DurationFormatSeconds(d time.Duration) interface{} { return d.Seconds() } +func DurationFormatMilliseconds(d time.Duration) interface{} { return d.Milliseconds() } + +// FormatIO formats net.Conn and other types that have an `Addr()` or `Name()`. +// +// See FormatEnabled for more information. +func FormatIO(ctx context.Context, v interface{}) string { + m := make(map[string]string) + m["type"] = reflect.TypeOf(v).String() + + switch t := v.(type) { + case net.Conn: + m["localAddress"] = formatAddr(t.LocalAddr()) + m["remoteAddress"] = formatAddr(t.RemoteAddr()) + case interface{ Addr() net.Addr }: + m["address"] = formatAddr(t.Addr()) + default: + return Format(ctx, t) + } + + return Format(ctx, m) +} + +func formatAddr(a net.Addr) string { + return a.Network() + "://" + a.String() +} + +// Format formats an object into a JSON string, without any indendtation or +// HTML escapes. +// Context is used to output a log waring if the conversion fails. +// +// This is intended primarily for `trace.StringAttribute()` +func Format(ctx context.Context, v interface{}) string { + b, err := encode(v) + if err != nil { + G(ctx).WithError(err).Warning("could not format value") + return "" + } + + return string(b) +} + +func encode(v interface{}) ([]byte, error) { + return encodeBuffer(&bytes.Buffer{}, v) +} + +func encodeBuffer(buf *bytes.Buffer, v interface{}) ([]byte, error) { + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + enc.SetIndent("", "") + + if err := enc.Encode(v); err != nil { + err = fmt.Errorf("could not marshall %T to JSON for logging: %w", v, err) + return nil, err + } + + // encoder.Encode appends a newline to the end + return bytes.TrimSpace(buf.Bytes()), nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go b/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go index 8f8940592..94c6d0918 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go @@ -1,23 +1,58 @@ package log import ( + "bytes" + "reflect" + "time" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/containerd/containerd/log" "github.com/sirupsen/logrus" "go.opencensus.io/trace" ) -// Hook serves to intercept and format `logrus.Entry`s before they are passed -// to the ETW hook. +const nullString = "null" + +// Hook intercepts and formats a [logrus.Entry] before it logged. // -// The containerd shim discards the (formatted) logrus output, and outputs only via ETW. -// The Linux GCS outputs logrus entries over stdout, which is consumed by the shim and -// then re-output via the ETW hook. -type Hook struct{} +// The shim either outputs the logs through an ETW hook, discarding the (formatted) output +// or logs output to a pipe for logging binaries to consume. +// The Linux GCS outputs logrus entries over stdout, which is then consumed and re-output +// by the shim. +type Hook struct { + // EncodeAsJSON formats structs, maps, arrays, slices, and [bytes.Buffer] as JSON. + // Variables of [bytes.Buffer] will be converted to []byte. + // + // Default is false. + EncodeAsJSON bool + + // FormatTime specifies the format for [time.Time] variables. + // An empty string disables formatting. + // When disabled, the fall back will the JSON encoding, if enabled. + // + // Default is [github.com/containerd/containerd/log.RFC3339NanoFixed]. + TimeFormat string + + // Duration format converts a [time.Duration] fields to an appropriate encoding. + // nil disables formatting. + // When disabled, the fall back will the JSON encoding, if enabled. + // + // Default is [DurationFormatString], which appends a duration unit after the value. + DurationFormat DurationFormat + + // AddSpanContext adds [logfields.TraceID] and [logfields.SpanID] fields to + // the entry from the span context stored in [logrus.Entry.Context], if it exists. + AddSpanContext bool +} var _ logrus.Hook = &Hook{} func NewHook() *Hook { - return &Hook{} + return &Hook{ + TimeFormat: log.RFC3339NanoFixed, + DurationFormat: DurationFormatString, + AddSpanContext: true, + } } func (h *Hook) Levels() []logrus.Level { @@ -25,14 +60,108 @@ func (h *Hook) Levels() []logrus.Level { } func (h *Hook) Fire(e *logrus.Entry) (err error) { + // JSON encode, if necessary, then add span information + h.encode(e) h.addSpanContext(e) return nil } +// encode loops through all the fields in the [logrus.Entry] and encodes them according to +// the settings in [Hook]. +// If [Hook.TimeFormat] is non-empty, it will be passed to [time.Time.Format] for +// fields of type [time.Time]. +// +// If [Hook.EncodeAsJSON] is true, then fields that are not numeric, boolean, strings, or +// errors will be encoded via a [json.Marshal] (with HTML escaping disabled). +// Chanel- and function-typed fields, as well as unsafe pointers are left alone and not encoded. +// +// If [Hook.TimeFormat] and [Hook.DurationFormat] are empty and [Hook.EncodeAsJSON] is false, +// then this is a no-op. +func (h *Hook) encode(e *logrus.Entry) { + d := e.Data + + formatTime := h.TimeFormat != "" + formatDuration := h.DurationFormat != nil + if !(h.EncodeAsJSON || formatTime || formatDuration) { + return + } + + for k, v := range d { + // encode types with dedicated formatting options first + + if vv, ok := v.(time.Time); formatTime && ok { + d[k] = vv.Format(h.TimeFormat) + continue + } + + if vv, ok := v.(time.Duration); formatDuration && ok { + d[k] = h.DurationFormat(vv) + continue + } + + // general case JSON encoding + + if !h.EncodeAsJSON { + continue + } + + switch vv := v.(type) { + // built in types + // "json" marshals errors as "{}", so leave alone here + case bool, string, error, uintptr, + int8, int16, int32, int64, int, + uint8, uint32, uint64, uint, + float32, float64: + continue + + // Rather than setting d[k] = vv.String(), JSON encode []byte value, since it + // may be a binary payload and not representable as a string. + // `case bytes.Buffer,*bytes.Buffer:` resolves `vv` to `interface{}`, + // so cannot use `vv.Bytes`. + // Could move to below the `reflect.Indirect()` call below, but + // that would require additional typematching and dereferencing. + // Easier to keep these duplicate branches here. + case bytes.Buffer: + v = vv.Bytes() + case *bytes.Buffer: + v = vv.Bytes() + } + + // dereference pointer or interface variables + rv := reflect.Indirect(reflect.ValueOf(v)) + // check if `v` is a null pointer + if !rv.IsValid() { + d[k] = nullString + continue + } + + switch rv.Kind() { + case reflect.Map, reflect.Struct, reflect.Array, reflect.Slice: + default: + // Bool, [U]?Int*, Float*, Complex*, Uintptr, String: encoded as normal + // Chan, Func: not supported by json + // Interface, Pointer: dereferenced above + // UnsafePointer: not supported by json, not safe to de-reference; leave alone + continue + } + + b, err := encode(v) + if err != nil { + // Errors are written to stderr (ie, to `panic.log`) and stops the remaining + // hooks (ie, exporting to ETW) from firing. So add encoding errors to + // the entry data to be written out, but keep on processing. + d[k+"-"+logrus.ErrorKey] = err.Error() + // keep the original `v` as the value, + continue + } + d[k] = string(b) + } +} + func (h *Hook) addSpanContext(e *logrus.Entry) { ctx := e.Context - if ctx == nil { + if !h.AddSpanContext || ctx == nil { return } span := trace.FromContext(ctx) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go b/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go index d51e0fd89..d1ef15096 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/json" "errors" - "strings" "sync/atomic" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" @@ -56,11 +55,11 @@ func ScrubProcessParameters(s string) (string, error) { } pp.Environment = map[string]string{_scrubbedReplacement: _scrubbedReplacement} - buf := bytes.NewBuffer(b[:0]) - if err := encode(buf, pp); err != nil { + b, err := encodeBuffer(bytes.NewBuffer(b[:0]), pp) + if err != nil { return "", err } - return strings.TrimSpace(buf.String()), nil + return string(b), nil } // ScrubBridgeCreate scrubs requests sent over the bridge of type @@ -150,21 +149,12 @@ func scrubBytes(b []byte, scrub scrubberFunc) ([]byte, error) { return nil, err } - buf := &bytes.Buffer{} - if err := encode(buf, m); err != nil { + b, err := encode(m) + if err != nil { return nil, err } - return bytes.TrimSpace(buf.Bytes()), nil -} - -func encode(buf *bytes.Buffer, v interface{}) error { - enc := json.NewEncoder(buf) - enc.SetEscapeHTML(false) - if err := enc.Encode(v); err != nil { - return err - } - return nil + return b, nil } func isRequestBase(m genMap) bool { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go new file mode 100644 index 000000000..71df25b8d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go @@ -0,0 +1,69 @@ +package oc + +import ( + "errors" + "io" + "net" + "os" + + "github.com/containerd/containerd/errdefs" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// todo: break import cycle with "internal/hcs/errors.go" and reference errors defined there +// todo: add errors defined in "internal/guest/gcserror" (Hresult does not implement error) + +func toStatusCode(err error) codes.Code { + // checks if err implements GRPCStatus() *"google.golang.org/grpc/status".Status, + // wraps an error defined in "github.com/containerd/containerd/errdefs", or is a + // context timeout or cancelled error + if s, ok := status.FromError(errdefs.ToGRPC(err)); ok { + return s.Code() + } + + switch { + // case isAny(err): + // return codes.Cancelled + case isAny(err, os.ErrInvalid): + return codes.InvalidArgument + case isAny(err, os.ErrDeadlineExceeded): + return codes.DeadlineExceeded + case isAny(err, os.ErrNotExist): + return codes.NotFound + case isAny(err, os.ErrExist): + return codes.AlreadyExists + case isAny(err, os.ErrPermission): + return codes.PermissionDenied + // case isAny(err): + // return codes.ResourceExhausted + case isAny(err, os.ErrClosed, net.ErrClosed, io.ErrClosedPipe, io.ErrShortBuffer): + return codes.FailedPrecondition + // case isAny(err): + // return codes.Aborted + // case isAny(err): + // return codes.OutOfRange + // case isAny(err): + // return codes.Unimplemented + case isAny(err, io.ErrNoProgress): + return codes.Internal + // case isAny(err): + // return codes.Unavailable + case isAny(err, io.ErrShortWrite, io.ErrUnexpectedEOF): + return codes.DataLoss + // case isAny(err): + // return codes.Unauthenticated + default: + return codes.Unknown + } +} + +// isAny returns true if errors.Is is true for any of the provided errors, errs. +func isAny(err error, errs ...error) bool { + for _, e := range errs { + if errors.Is(err, e) { + return true + } + } + return false +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go index f428bdaf7..28f8f43a9 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go @@ -3,19 +3,26 @@ package oc import ( "github.com/sirupsen/logrus" "go.opencensus.io/trace" + "google.golang.org/grpc/codes" + + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/logfields" ) -var _ = (trace.Exporter)(&LogrusExporter{}) +const spanMessage = "Span" + +var _errorCodeKey = logrus.ErrorKey + "Code" // LogrusExporter is an OpenCensus `trace.Exporter` that exports // `trace.SpanData` to logrus output. -type LogrusExporter struct { -} +type LogrusExporter struct{} + +var _ trace.Exporter = &LogrusExporter{} // ExportSpan exports `s` based on the the following rules: // -// 1. All output will contain `s.Attributes`, `s.TraceID`, `s.SpanID`, -// `s.ParentSpanID` for correlation +// 1. All output will contain `s.Attributes`, `s.SpanKind`, `s.TraceID`, +// `s.SpanID`, and `s.ParentSpanID` for correlation // // 2. Any calls to .Annotate will not be supported. // @@ -23,21 +30,57 @@ type LogrusExporter struct { // `s.Status.Code != 0` in which case it will be written at `logrus.ErrorLevel` // providing `s.Status.Message` as the error value. func (le *LogrusExporter) ExportSpan(s *trace.SpanData) { - // Combine all span annotations with traceID, spanID, parentSpanID - baseEntry := logrus.WithFields(logrus.Fields(s.Attributes)) - baseEntry.Data["traceID"] = s.TraceID.String() - baseEntry.Data["spanID"] = s.SpanID.String() - baseEntry.Data["parentSpanID"] = s.ParentSpanID.String() - baseEntry.Data["startTime"] = s.StartTime - baseEntry.Data["endTime"] = s.EndTime - baseEntry.Data["duration"] = s.EndTime.Sub(s.StartTime).String() - baseEntry.Data["name"] = s.Name - baseEntry.Time = s.StartTime + if s.DroppedAnnotationCount > 0 { + logrus.WithFields(logrus.Fields{ + "name": s.Name, + logfields.TraceID: s.TraceID.String(), + logfields.SpanID: s.SpanID.String(), + "dropped": s.DroppedAttributeCount, + "maxAttributes": len(s.Attributes), + }).Warning("span had dropped attributes") + } + + entry := log.L.Dup() + // Combine all span annotations with span data (eg, trace ID, span ID, parent span ID, + // error, status code) + // (OC) Span attributes are guaranteed to be strings, bools, or int64s, so we can + // can skip overhead in entry.WithFields() and add them directly to entry.Data. + // Preallocate ahead of time, since we should add, at most, 10 additional entries + data := make(logrus.Fields, len(entry.Data)+len(s.Attributes)+10) + + // Default log entry may have prexisting/application-wide data + for k, v := range entry.Data { + data[k] = v + } + for k, v := range s.Attributes { + data[k] = v + } + + data[logfields.Name] = s.Name + data[logfields.TraceID] = s.TraceID.String() + data[logfields.SpanID] = s.SpanID.String() + data[logfields.ParentSpanID] = s.ParentSpanID.String() + data[logfields.StartTime] = s.StartTime + data[logfields.EndTime] = s.EndTime + data[logfields.Duration] = s.EndTime.Sub(s.StartTime) + if sk := spanKindToString(s.SpanKind); sk != "" { + data["spanKind"] = sk + } level := logrus.InfoLevel if s.Status.Code != 0 { level = logrus.ErrorLevel - baseEntry.Data[logrus.ErrorKey] = s.Status.Message + + // don't overwrite an existing "error" or "errorCode" attributes + if _, ok := data[logrus.ErrorKey]; !ok { + data[logrus.ErrorKey] = s.Status.Message + } + if _, ok := data[_errorCodeKey]; !ok { + data[_errorCodeKey] = codes.Code(s.Status.Code).String() + } } - baseEntry.Log(level, "Span") + + entry.Data = data + entry.Time = s.StartTime + entry.Log(level, spanMessage) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go index 0e2b7e9bf..726078432 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go @@ -14,8 +14,7 @@ var DefaultSampler = trace.AlwaysSample() func SetSpanStatus(span *trace.Span, err error) { status := trace.Status{} if err != nil { - // TODO: JTERRY75 - Handle errors in a non-generic way - status.Code = trace.StatusCodeUnknown + status.Code = int32(toStatusCode(err)) status.Message = err.Error() } span.SetStatus(status) @@ -46,3 +45,14 @@ func update(ctx context.Context, s *trace.Span) (context.Context, *trace.Span) { var WithServerSpanKind = trace.WithSpanKind(trace.SpanKindServer) var WithClientSpanKind = trace.WithSpanKind(trace.SpanKindClient) + +func spanKindToString(sk int) string { + switch sk { + case trace.SpanKindClient: + return "client" + case trace.SpanKindServer: + return "server" + default: + return "" + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go index bfcc15769..7dfa1e594 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go @@ -23,20 +23,14 @@ type ( ) type explicitAccess struct { - //nolint:structcheck accessPermissions accessMask - //nolint:structcheck - accessMode accessMode - //nolint:structcheck - inheritance inheritMode - //nolint:structcheck - trustee trustee + accessMode accessMode + inheritance inheritMode + trustee trustee } type trustee struct { - //nolint:unused,structcheck - multipleTrustee *trustee - //nolint:unused,structcheck + multipleTrustee *trustee multipleTrusteeOperation int32 trusteeForm trusteeForm trusteeType trusteeType diff --git a/vendor/github.com/containers/common/libimage/filters.go b/vendor/github.com/containers/common/libimage/filters.go index 441011edd..995f89c78 100644 --- a/vendor/github.com/containers/common/libimage/filters.go +++ b/vendor/github.com/containers/common/libimage/filters.go @@ -178,7 +178,7 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp filter = filterManifest(ctx, manifest) case "reference": - filter = filterReferences(value) + filter = filterReferences(r, value) case "until": until, err := r.until(value) @@ -268,8 +268,15 @@ func filterManifest(ctx context.Context, value bool) filterFunc { } // filterReferences creates a reference filter for matching the specified value. -func filterReferences(value string) filterFunc { +func filterReferences(r *Runtime, value string) filterFunc { + lookedUp, _, _ := r.LookupImage(value, nil) return func(img *Image) (bool, error) { + if lookedUp != nil { + if lookedUp.ID() == img.ID() { + return true, nil + } + } + refs, err := img.NamesReferences() if err != nil { return false, err @@ -306,6 +313,7 @@ func filterReferences(value string) filterFunc { } } } + return false, nil } } @@ -386,10 +394,12 @@ func filterID(value string) filterFunc { } } -// filterDigest creates an digest filter for matching the specified value. +// filterDigest creates a digest filter for matching the specified value. func filterDigest(value string) filterFunc { + // TODO: return an error if value is not a digest + // if _, err := digest.Parse(value); err != nil {...} return func(img *Image) (bool, error) { - return string(img.Digest()) == value, nil + return img.hasDigest(value), nil } } diff --git a/vendor/github.com/containers/common/libimage/history.go b/vendor/github.com/containers/common/libimage/history.go index 46252df10..ad989b528 100644 --- a/vendor/github.com/containers/common/libimage/history.go +++ b/vendor/github.com/containers/common/libimage/history.go @@ -51,7 +51,6 @@ func (i *Image) History(ctx context.Context) ([]ImageHistory, error) { } if layer != nil { - history.Tags = layer.Names if !ociImage.History[x].EmptyLayer { history.Size = layer.UncompressedSize } @@ -64,8 +63,13 @@ func (i *Image) History(ctx context.Context) ([]ImageHistory, error) { history.ID = id usedIDs[id] = true } + for i := range node.images { + history.Tags = append(history.Tags, node.images[i].Names()...) + } } - if layer.Parent != "" && !ociImage.History[x].EmptyLayer { + if layer.Parent == "" { + layer = nil + } else if !ociImage.History[x].EmptyLayer { layer, err = i.runtime.store.Layer(layer.Parent) if err != nil { return nil, err diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go index 9090f035a..dc47030c5 100644 --- a/vendor/github.com/containers/common/libimage/image.go +++ b/vendor/github.com/containers/common/libimage/image.go @@ -144,6 +144,9 @@ func (i *Image) ID() string { // possibly many digests that we have stored for the image, so many // applications are better off using the entire list returned by Digests(). func (i *Image) Digest() digest.Digest { + // TODO: we return the image digest or the one of the manifest list + // which can lead to issues depending on the callers' assumptions. + // Hence, deprecate in favor of Digest_s_. return i.storageImage.Digest } @@ -154,6 +157,18 @@ func (i *Image) Digests() []digest.Digest { return i.storageImage.Digests } +// hasDigest returns whether the specified value matches any digest of the +// image. +func (i *Image) hasDigest(value string) bool { + // TODO: change the argument to a typed digest.Digest + for _, d := range i.Digests() { + if string(d) == value { + return true + } + } + return false +} + // IsReadOnly returns whether the image is set read only. func (i *Image) IsReadOnly() bool { return i.storageImage.ReadOnly @@ -402,7 +417,7 @@ func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveIma // have a closer look at the errors. On top, image removal should be // tolerant toward corrupted images. handleError := func(err error) error { - if errors.Is(err, storage.ErrImageUnknown) || errors.Is(err, storage.ErrNotAnImage) || errors.Is(err, storage.ErrLayerUnknown) { + if ErrorIsImageUnknown(err) { // The image or layers of the image may already have been removed // in which case we consider the image to be removed. return nil @@ -424,14 +439,12 @@ func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveIma numNames := len(i.Names()) // NOTE: the `numNames == 1` check is not only a performance - // optimization but also preserves exiting Podman/Docker behaviour. + // optimization but also preserves existing Podman/Docker behaviour. // If image "foo" is used by a container and has only this tag/name, // an `rmi foo` will not untag "foo" but instead attempt to remove the // entire image. If there's a container using "foo", we should get an // error. - if referencedBy == "" || numNames == 1 { - // DO NOTHING, the image will be removed - } else { + if !(referencedBy == "" || numNames == 1) { byID := strings.HasPrefix(i.ID(), referencedBy) byDigest := strings.HasPrefix(referencedBy, "sha256:") if !options.Force { @@ -658,6 +671,8 @@ func (i *Image) NamedTaggedRepoTags() ([]reference.NamedTagged, error) { // NamedRepoTags returns the repotags associated with the image as a // slice of reference.Named. func (i *Image) NamedRepoTags() ([]reference.Named, error) { + // FIXME: the NamedRepoTags name is a bit misleading as it can return + // repo@digest values if that’s how an image was pulled. var repoTags []reference.Named for _, name := range i.Names() { parsed, err := reference.Parse(name) @@ -671,32 +686,37 @@ func (i *Image) NamedRepoTags() ([]reference.Named, error) { return repoTags, nil } -// inRepoTags looks for the specified name/tag pair in the image's repo tags. -func (i *Image) inRepoTags(namedTagged reference.NamedTagged) (reference.Named, error) { +// inRepoTags looks for the specified name/tag in the image's repo tags. If +// `ignoreTag` is set, only the repo must match and the tag is ignored. +func (i *Image) inRepoTags(namedTagged reference.NamedTagged, ignoreTag bool) (reference.Named, error) { repoTags, err := i.NamedRepoTags() if err != nil { return nil, err } - pairs, err := ToNameTagPairs(repoTags) - if err != nil { - return nil, err - } - name := namedTagged.Name() tag := namedTagged.Tag() - for _, pair := range pairs { - if tag != pair.Tag { - continue + for _, r := range repoTags { + if !ignoreTag { + var repoTag string + tagged, isTagged := r.(reference.NamedTagged) + if isTagged { + repoTag = tagged.Tag() + } + if !isTagged || tag != repoTag { + continue + } } - if !strings.HasSuffix(pair.Name, name) { + + repoName := r.Name() + if !strings.HasSuffix(repoName, name) { continue } - if len(pair.Name) == len(name) { // full match - return pair.named, nil + if len(repoName) == len(name) { // full match + return r, nil } - if pair.Name[len(pair.Name)-len(name)-1] == '/' { // matches at repo - return pair.named, nil + if repoName[len(repoName)-len(name)-1] == '/' { // matches at repo + return r, nil } } @@ -737,7 +757,7 @@ func (i *Image) RepoDigests() ([]string, error) { // Mount the image with the specified mount options and label, both of which // are directly passed down to the containers storage. Returns the fully // evaluated path to the mount point. -func (i *Image) Mount(ctx context.Context, mountOptions []string, mountLabel string) (string, error) { +func (i *Image) Mount(_ context.Context, mountOptions []string, mountLabel string) (string, error) { if i.runtime.eventChannel != nil { defer i.runtime.writeEvent(&Event{ID: i.ID(), Name: "", Time: time.Now(), Type: EventTypeImageMount}) } @@ -808,6 +828,9 @@ type HasDifferentDigestOptions struct { // containers-auth.json(5) file to use when authenticating against // container registries. AuthFilePath string + // Allow contacting registries over HTTP, or HTTPS with failed TLS + // verification. Note that this does not affect other TLS connections. + InsecureSkipTLSVerify types.OptionalBool } // HasDifferentDigest returns true if the image specified by `remoteRef` has a @@ -833,8 +856,15 @@ func (i *Image) HasDifferentDigest(ctx context.Context, remoteRef types.ImageRef sys.VariantChoice = inspectInfo.Variant } - if options != nil && options.AuthFilePath != "" { - sys.AuthFilePath = options.AuthFilePath + if options != nil { + if options.AuthFilePath != "" { + sys.AuthFilePath = options.AuthFilePath + } + if options.InsecureSkipTLSVerify != types.OptionalBoolUndefined { + sys.DockerInsecureSkipTLSVerify = options.InsecureSkipTLSVerify + sys.OCIInsecureSkipTLSVerify = options.InsecureSkipTLSVerify == types.OptionalBoolTrue + sys.DockerDaemonInsecureSkipTLSVerify = options.InsecureSkipTLSVerify == types.OptionalBoolTrue + } } return i.hasDifferentDigestWithSystemContext(ctx, remoteRef, sys) diff --git a/vendor/github.com/containers/common/libimage/import.go b/vendor/github.com/containers/common/libimage/import.go index f557db626..6e739f93f 100644 --- a/vendor/github.com/containers/common/libimage/import.go +++ b/vendor/github.com/containers/common/libimage/import.go @@ -54,11 +54,13 @@ func (r *Runtime) Import(ctx context.Context, path string, options *ImportOption } config := v1.Image{ - Config: ic, - History: history, - OS: options.OS, - Architecture: options.Arch, - Variant: options.Variant, + Config: ic, + History: history, + Platform: v1.Platform{ + OS: options.OS, + Architecture: options.Arch, + Variant: options.Variant, + }, } u, err := url.ParseRequestURI(path) diff --git a/vendor/github.com/containers/common/libimage/layer_tree.go b/vendor/github.com/containers/common/libimage/layer_tree.go index a7d2f8c58..e6b012f90 100644 --- a/vendor/github.com/containers/common/libimage/layer_tree.go +++ b/vendor/github.com/containers/common/libimage/layer_tree.go @@ -2,8 +2,10 @@ package libimage import ( "context" + "errors" "github.com/containers/storage" + storageTypes "github.com/containers/storage/types" ociv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) @@ -30,7 +32,19 @@ func (t *layerTree) node(layerID string) *layerNode { return node } +// ErrorIsImageUnknown returns true if the specified error indicates that an +// image is unknown or has been partially removed (e.g., a missing layer). +func ErrorIsImageUnknown(err error) bool { + return errors.Is(err, storage.ErrImageUnknown) || + errors.Is(err, storageTypes.ErrLayerUnknown) || + errors.Is(err, storageTypes.ErrSizeUnknown) || + errors.Is(err, storage.ErrNotAnImage) +} + // toOCI returns an OCI image for the specified image. +// +// WARNING: callers are responsible for handling cases where the target image +// has been (partially) removed and can use `ErrorIsImageUnknown` to detect it. func (t *layerTree) toOCI(ctx context.Context, i *Image) (*ociv1.Image, error) { var err error oci, exists := t.ociCache[i.ID()] @@ -155,6 +169,9 @@ func (t *layerTree) children(ctx context.Context, parent *Image, all bool) ([]*I parentID := parent.ID() parentOCI, err := t.toOCI(ctx, parent) if err != nil { + if ErrorIsImageUnknown(err) { + return nil, nil + } return nil, err } @@ -165,6 +182,9 @@ func (t *layerTree) children(ctx context.Context, parent *Image, all bool) ([]*I } childOCI, err := t.toOCI(ctx, child) if err != nil { + if ErrorIsImageUnknown(err) { + return false, nil + } return false, err } // History check. @@ -255,6 +275,9 @@ func (t *layerTree) parent(ctx context.Context, child *Image) (*Image, error) { childID := child.ID() childOCI, err := t.toOCI(ctx, child) if err != nil { + if ErrorIsImageUnknown(err) { + return nil, nil + } return nil, err } @@ -268,6 +291,9 @@ func (t *layerTree) parent(ctx context.Context, child *Image) (*Image, error) { } emptyOCI, err := t.toOCI(ctx, empty) if err != nil { + if ErrorIsImageUnknown(err) { + return nil, nil + } return nil, err } // History check. @@ -300,6 +326,9 @@ func (t *layerTree) parent(ctx context.Context, child *Image) (*Image, error) { } parentOCI, err := t.toOCI(ctx, parent) if err != nil { + if ErrorIsImageUnknown(err) { + return nil, nil + } return nil, err } // History check. diff --git a/vendor/github.com/containers/common/libimage/manifest_list.go b/vendor/github.com/containers/common/libimage/manifest_list.go index bf1738a33..0223fb355 100644 --- a/vendor/github.com/containers/common/libimage/manifest_list.go +++ b/vendor/github.com/containers/common/libimage/manifest_list.go @@ -217,6 +217,11 @@ func (i *Image) getManifestList() (manifests.List, error) { // image index (OCI). This information may be critical to make certain // execution paths more robust (e.g., suppress certain errors). func (i *Image) IsManifestList(ctx context.Context) (bool, error) { + // FIXME: due to `ImageDigestBigDataKey` we'll always check the + // _last-written_ manifest which is causing issues for multi-arch image + // pulls. + // + // See https://github.com/containers/common/pull/1505#discussion_r1242677279. ref, err := i.StorageReference() if err != nil { return false, err @@ -388,10 +393,7 @@ func (m *ManifestList) AnnotateInstance(d digest.Digest, options *ManifestListAn } // Write the changes to disk. - if err := m.saveAndReload(); err != nil { - return err - } - return nil + return m.saveAndReload() } // RemoveInstance removes the instance specified by `d` from the manifest list. @@ -402,10 +404,7 @@ func (m *ManifestList) RemoveInstance(d digest.Digest) error { } // Write the changes to disk. - if err := m.saveAndReload(); err != nil { - return err - } - return nil + return m.saveAndReload() } // ManifestListPushOptions allow for customizing pushing a manifest list. diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go index 0f3c1d711..7a51b8423 100644 --- a/vendor/github.com/containers/common/libimage/manifests/manifests.go +++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go @@ -423,7 +423,7 @@ func (l *list) Remove(instanceDigest digest.Digest) error { // then use that list's SaveToImage() method to save a modified version of the // list to that image record use this lock to avoid accidentally wiping out // changes that another process is also attempting to make. -func LockerForImage(store storage.Store, image string) (lockfile.Locker, error) { +func LockerForImage(store storage.Store, image string) (lockfile.Locker, error) { // nolint:staticcheck img, err := store.Image(image) if err != nil { return nil, fmt.Errorf("locating image %q for locating lock: %w", image, err) diff --git a/vendor/github.com/containers/common/libimage/normalize.go b/vendor/github.com/containers/common/libimage/normalize.go index bb3cdbc7c..9619b1a0d 100644 --- a/vendor/github.com/containers/common/libimage/normalize.go +++ b/vendor/github.com/containers/common/libimage/normalize.go @@ -100,22 +100,22 @@ func ToNameTagPairs(repoTags []reference.Named) ([]NameTagPair, error) { // normalizeTaggedDigestedString strips the tag off the specified string iff it // is tagged and digested. Note that the tag is entirely ignored to match // Docker behavior. -func normalizeTaggedDigestedString(s string) (string, error) { +func normalizeTaggedDigestedString(s string) (string, reference.Named, error) { // Note that the input string is not expected to be parseable, so we // return it verbatim in error cases. ref, err := reference.Parse(s) if err != nil { - return "", err + return "", nil, err } named, ok := ref.(reference.Named) if !ok { - return s, nil + return s, nil, nil } named, err = normalizeTaggedDigestedNamed(named) if err != nil { - return "", err + return "", nil, err } - return named.String(), nil + return named.String(), named, nil } // normalizeTaggedDigestedNamed strips the tag off the specified named diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go index 4a0f5970d..296d003a8 100644 --- a/vendor/github.com/containers/common/libimage/pull.go +++ b/vendor/github.com/containers/common/libimage/pull.go @@ -86,7 +86,7 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP // Docker compat: strip off the tag iff name is tagged and digested // (e.g., fedora:latest@sha256...). In that case, the tag is stripped // off and entirely ignored. The digest is the sole source of truth. - normalizedName, normalizeError := normalizeTaggedDigestedString(name) + normalizedName, _, normalizeError := normalizeTaggedDigestedString(name) if normalizeError != nil { return nil, normalizeError } @@ -442,8 +442,17 @@ func (r *Runtime) imagesIDsForManifest(manifestBytes []byte, sys *types.SystemCo if err != nil { return nil, fmt.Errorf("listing images by manifest digest: %w", err) } - results := make([]string, 0, len(images)) + + // If you have additionStores defined and the same image stored in + // both storage and additional store, it can be output twice. + // Fixes github.com/containers/podman/issues/18647 + results := []string{} + imageMap := map[string]bool{} for _, image := range images { + if imageMap[image.ID] { + continue + } + imageMap[image.ID] = true results = append(results, image.ID) } if len(results) == 0 { diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go index 95da83bb9..7707d2e3b 100644 --- a/vendor/github.com/containers/common/libimage/runtime.go +++ b/vendor/github.com/containers/common/libimage/runtime.go @@ -16,6 +16,7 @@ import ( "github.com/containers/storage" deepcopy "github.com/jinzhu/copier" jsoniter "github.com/json-iterator/go" + "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) @@ -239,7 +240,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, // Docker compat: strip off the tag iff name is tagged and digested // (e.g., fedora:latest@sha256...). In that case, the tag is stripped // off and entirely ignored. The digest is the sole source of truth. - normalizedName, err := normalizeTaggedDigestedString(name) + normalizedName, possiblyUnqualifiedNamedReference, err := normalizeTaggedDigestedString(name) if err != nil { return nil, "", err } @@ -259,7 +260,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, // If the name clearly refers to a local image, try to look it up. if byFullID || byDigest { - img, err := r.lookupImageInLocalStorage(originalName, name, options) + img, err := r.lookupImageInLocalStorage(originalName, name, nil, options) if err != nil { return nil, "", err } @@ -297,7 +298,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, } for _, candidate := range candidates { - img, err := r.lookupImageInLocalStorage(name, candidate.String(), options) + img, err := r.lookupImageInLocalStorage(name, candidate.String(), candidate, options) if err != nil { return nil, "", err } @@ -308,7 +309,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, // The specified name may refer to a short ID. Note that this *must* // happen after the short-name expansion as done above. - img, err := r.lookupImageInLocalStorage(name, name, options) + img, err := r.lookupImageInLocalStorage(name, name, nil, options) if err != nil { return nil, "", err } @@ -316,21 +317,51 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, return img, name, err } - return r.lookupImageInDigestsAndRepoTags(name, options) + return r.lookupImageInDigestsAndRepoTags(name, possiblyUnqualifiedNamedReference, options) } // lookupImageInLocalStorage looks up the specified candidate for name in the // storage and checks whether it's matching the system context. -func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *LookupImageOptions) (*Image, error) { +func (r *Runtime) lookupImageInLocalStorage(name, candidate string, namedCandidate reference.Named, options *LookupImageOptions) (*Image, error) { logrus.Debugf("Trying %q ...", candidate) - img, err := r.store.Image(candidate) - if err != nil && !errors.Is(err, storage.ErrImageUnknown) { - return nil, err - } - if img == nil { - return nil, nil + + var err error + var img *storage.Image + var ref types.ImageReference + + // FIXME: the lookup logic for manifest lists needs improvement. + // See https://github.com/containers/common/pull/1505#discussion_r1242677279 + // for details. + + // For images pulled by tag, Image.Names does not currently contain a + // repo@digest value, so such an input would not match directly in + // c/storage. + if namedCandidate != nil { + namedCandidate = reference.TagNameOnly(namedCandidate) + ref, err = storageTransport.Transport.NewStoreReference(r.store, namedCandidate, "") + if err != nil { + return nil, err + } + img, err = storageTransport.Transport.GetStoreImage(r.store, ref) + if err != nil { + if errors.Is(err, storage.ErrImageUnknown) { + return nil, nil + } + return nil, err + } + // NOTE: we must reparse the reference another time below since + // an ordinary image may have resolved into a per-platform image + // without any regard to options.{Architecture,OS,Variant}. + } else { + img, err = r.store.Image(candidate) + if err != nil { + if errors.Is(err, storage.ErrImageUnknown) { + return nil, nil + } + return nil, err + } } - ref, err := storageTransport.Transport.ParseStoreReference(r.store, img.ID) + ref, err = storageTransport.Transport.ParseStoreReference(r.store, img.ID) if err != nil { return nil, err } @@ -417,76 +448,71 @@ func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *Loo // lookupImageInDigestsAndRepoTags attempts to match name against any image in // the local containers storage. If name is digested, it will be compared // against image digests. Otherwise, it will be looked up in the repo tags. -func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, options *LookupImageOptions) (*Image, string, error) { - // Until now, we've tried very hard to find an image but now it is time - // for limbo. If the image includes a digest that we couldn't detect - // verbatim in the storage, we must have a look at all digests of all - // images. Those may change over time (e.g., via manifest lists). - // Both Podman and Buildah want us to do that dance. - allImages, err := r.ListImages(context.Background(), nil, nil) - if err != nil { - return nil, "", err - } +func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, possiblyUnqualifiedNamedReference reference.Named, options *LookupImageOptions) (*Image, string, error) { + originalName := name // we may change name below - ref, err := reference.Parse(name) // Warning! This is not ParseNormalizedNamed - if err != nil { - return nil, "", err - } - named, isNamed := ref.(reference.Named) - if !isNamed { - return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown) + if possiblyUnqualifiedNamedReference == nil { + return nil, "", fmt.Errorf("%s: %w", originalName, storage.ErrImageUnknown) } - digested, isDigested := named.(reference.Digested) + // In case of a digested reference, we strip off the digest and require + // any image matching the repo/tag to also match the specified digest. + var requiredDigest digest.Digest + digested, isDigested := possiblyUnqualifiedNamedReference.(reference.Digested) if isDigested { - logrus.Debug("Looking for image with matching recorded digests") - digest := digested.Digest() - for _, image := range allImages { - for _, d := range image.Digests() { - if d != digest { - continue - } - // Also make sure that the matching image fits all criteria (e.g., manifest list). - if _, err := r.lookupImageInLocalStorage(name, image.ID(), options); err != nil { - return nil, "", err - } - return image, name, nil - - } - } - return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown) + requiredDigest = digested.Digest() + possiblyUnqualifiedNamedReference = reference.TrimNamed(possiblyUnqualifiedNamedReference) + name = possiblyUnqualifiedNamedReference.String() } if !shortnames.IsShortName(name) { - return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown) + return nil, "", fmt.Errorf("%s: %w", originalName, storage.ErrImageUnknown) } - named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed - namedTagged, isNammedTagged := named.(reference.NamedTagged) - if !isNammedTagged { - // NOTE: this should never happen since we already know it's - // not a digested reference. - return nil, "", fmt.Errorf("%s: %w (could not cast to tagged)", name, storage.ErrImageUnknown) + // Docker compat: make sure to add the "latest" tag if needed. The tag + // will be ignored if we're looking for a digest match. + possiblyUnqualifiedNamedReference = reference.TagNameOnly(possiblyUnqualifiedNamedReference) + namedTagged, isNamedTagged := possiblyUnqualifiedNamedReference.(reference.NamedTagged) + if !isNamedTagged { + // NOTE: this should never happen since we already stripped off + // the digest. + return nil, "", fmt.Errorf("%s: %w (could not cast to tagged)", originalName, storage.ErrImageUnknown) + } + + allImages, err := r.ListImages(context.Background(), nil, nil) + if err != nil { + return nil, "", err } for _, image := range allImages { - named, err := image.inRepoTags(namedTagged) + named, err := image.inRepoTags(namedTagged, isDigested) if err != nil { return nil, "", err } if named == nil { continue } - img, err := r.lookupImageInLocalStorage(name, named.String(), options) + img, err := r.lookupImageInLocalStorage(name, named.String(), named, options) if err != nil { return nil, "", err } if img != nil { - return img, named.String(), err + if isDigested { + if !img.hasDigest(requiredDigest.String()) { + continue + } + named = reference.TrimNamed(named) + canonical, err := reference.WithDigest(named, requiredDigest) + if err != nil { + return nil, "", fmt.Errorf("building canonical reference with digest %q and matched %q: %w", requiredDigest.String(), named.String(), err) + } + return img, canonical.String(), nil + } + return img, named.String(), nil } } - return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown) + return nil, "", fmt.Errorf("%s: %w", originalName, storage.ErrImageUnknown) } // ResolveName resolves the specified name. If the name resolves to a local diff --git a/vendor/github.com/containers/common/libnetwork/types/const.go b/vendor/github.com/containers/common/libnetwork/types/const.go index e367f9ad3..83103ef6e 100644 --- a/vendor/github.com/containers/common/libnetwork/types/const.go +++ b/vendor/github.com/containers/common/libnetwork/types/const.go @@ -36,11 +36,13 @@ const ( IPVLANModeL3s = "l3s" // valid network options - VLANOption = "vlan" - MTUOption = "mtu" - ModeOption = "mode" - IsolateOption = "isolate" - MetricOption = "metric" + VLANOption = "vlan" + MTUOption = "mtu" + ModeOption = "mode" + IsolateOption = "isolate" + MetricOption = "metric" + NoDefaultRoute = "no_default_route" + BclimOption = "bclim" ) type NetworkBackend string diff --git a/vendor/github.com/containers/common/libnetwork/types/define.go b/vendor/github.com/containers/common/libnetwork/types/define.go index f84221458..6e91ccda9 100644 --- a/vendor/github.com/containers/common/libnetwork/types/define.go +++ b/vendor/github.com/containers/common/libnetwork/types/define.go @@ -3,7 +3,8 @@ package types import ( "errors" "fmt" - "regexp" + + "github.com/containers/storage/pkg/regexp" ) var ( @@ -19,7 +20,11 @@ var ( // NameRegex is a regular expression to validate names. // This must NOT be changed. - NameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$") + NameRegex = regexp.Delayed("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$") // RegexError is thrown in presence of an invalid name. RegexError = fmt.Errorf("names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*: %w", ErrInvalidArg) // nolint:revive // This lint is new and we do not want to break the API. + + // NotHexRegex is a regular expression to check if a string is + // a hexadecimal string. + NotHexRegex = regexp.Delayed(`[^0-9a-fA-F]`) ) diff --git a/vendor/github.com/containers/common/libnetwork/types/network.go b/vendor/github.com/containers/common/libnetwork/types/network.go index b8804bf6b..94087fd37 100644 --- a/vendor/github.com/containers/common/libnetwork/types/network.go +++ b/vendor/github.com/containers/common/libnetwork/types/network.go @@ -34,6 +34,10 @@ type ContainerNetwork interface { // DefaultNetworkName will return the default network name // for this interface. DefaultNetworkName() string + + // NetworkInfo return the network information about backend type, + // binary path, package version and so on. + NetworkInfo() NetworkInfo } // Network describes the Network attributes. @@ -50,6 +54,8 @@ type Network struct { Created time.Time `json:"created,omitempty"` // Subnets to use for this network. Subnets []Subnet `json:"subnets,omitempty"` + // Routes to use for this network. + Routes []Route `json:"routes,omitempty"` // IPv6Enabled if set to true an ipv6 subnet should be created for this net. IPv6Enabled bool `json:"ipv6_enabled"` // Internal is whether the Network should not have external routes @@ -80,6 +86,22 @@ type NetworkUpdateOptions struct { RemoveDNSServers []string `json:"remove_dns_servers,omitempty"` } +// NetworkInfo contains the network information. +type NetworkInfo struct { + Backend NetworkBackend `json:"backend"` + Version string `json:"version,omitempty"` + Package string `json:"package,omitempty"` + Path string `json:"path,omitempty"` + DNS DNSNetworkInfo `json:"dns,omitempty"` +} + +// NetworkInfo contains the DNS information. +type DNSNetworkInfo struct { + Version string `json:"version,omitempty"` + Package string `json:"package,omitempty"` + Path string `json:"path,omitempty"` +} + // IPNet is used as custom net.IPNet type to add Marshal/Unmarshal methods. type IPNet struct { net.IPNet @@ -169,6 +191,17 @@ type Subnet struct { LeaseRange *LeaseRange `json:"lease_range,omitempty"` } +type Route struct { + // Destination for this route in CIDR form. + // swagger:strfmt string + Destination IPNet `json:"destination"` + // Gateway IP for this route. + // swagger:strfmt string + Gateway net.IP `json:"gateway"` + // Metric for this route. Optional. + Metric *uint32 `json:"metric,omitempty"` +} + // LeaseRange contains the range where IP are leased. type LeaseRange struct { // StartIP first IP in the subnet which should be used to assign ips. diff --git a/vendor/github.com/containers/common/libnetwork/util/filters.go b/vendor/github.com/containers/common/libnetwork/util/filters.go index 2f1e4a21f..782c5d2b9 100644 --- a/vendor/github.com/containers/common/libnetwork/util/filters.go +++ b/vendor/github.com/containers/common/libnetwork/util/filters.go @@ -38,7 +38,7 @@ func createFilterFuncs(key string, filterValues []string) (types.FilterFunc, err case "id": // matches part of one id return func(net types.Network) bool { - return util.StringMatchRegexSlice(net.ID, filterValues) + return util.FilterID(net.ID, filterValues) }, nil // TODO: add dns enabled, internal filter diff --git a/vendor/github.com/containers/common/libnetwork/util/ip.go b/vendor/github.com/containers/common/libnetwork/util/ip.go index 7c315e312..1e426926e 100644 --- a/vendor/github.com/containers/common/libnetwork/util/ip.go +++ b/vendor/github.com/containers/common/libnetwork/util/ip.go @@ -54,3 +54,25 @@ func NormalizeIP(ip *net.IP) { *ip = ipv4 } } + +// GetLocalIP returns the first non loopback local IPv4 of the host. +// If no ipv4 address is found it may return an ipv6 address. +// When no ip is found and empty string is returned. +func GetLocalIP() string { + addrs, err := net.InterfaceAddrs() + if err != nil { + return "" + } + ip := "" + for _, address := range addrs { + // check the address type and if it is not a loopback the display it + if ipnet, ok := address.(*net.IPNet); ok && ipnet.IP.IsGlobalUnicast() { + if IsIPv4(ipnet.IP) { + return ipnet.IP.String() + } + // if ipv6 we keep looking for an ipv4 address + ip = ipnet.IP.String() + } + } + return ip +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go b/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go index a4cc2acaf..c55c76864 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go @@ -52,6 +52,6 @@ func (c *linuxCpusetHandler) Destroy(ctr *CgroupControl) error { } // Stat fills a metrics structure with usage stats for the controller -func (c *linuxCpusetHandler) Stat(ctr *CgroupControl, m *cgroups.Stats) error { +func (c *linuxCpusetHandler) Stat(_ *CgroupControl, _ *cgroups.Stats) error { return nil } diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go index 140f10651..a408b4fd4 100644 --- a/vendor/github.com/containers/common/pkg/config/config.go +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -143,6 +143,12 @@ type ContainersConfig struct { // Labeling to separate containers (SELinux) EnableLabeling bool `toml:"label,omitempty"` + // EnableLabeledUsers indicates whether to enforce confined users with + // containers on SELinux systems. This option causes containers to + // maintain the current user and role field of the calling process. + // Otherwise containers run with user system_u, and the role system_r. + EnableLabeledUsers bool `toml:"label_users,omitempty"` + // Env is the environment variable list for container process. Env []string `toml:"env,omitempty"` @@ -504,6 +510,9 @@ type EngineConfig struct { // CompressionFormat is the compression format used to compress image layers. CompressionFormat string `toml:"compression_format,omitempty"` + + // CompressionLevel is the compression level used to compress image layers. + CompressionLevel *int `toml:"compression_level,omitempty"` } // SetOptions contains a subset of options in a Config. It's used to indicate if @@ -578,6 +587,10 @@ type NetworkConfig struct { // are always assigned randomly. DefaultSubnetPools []SubnetPool `toml:"default_subnet_pools,omitempty"` + // DefaultRootlessNetworkCmd is used to set the default rootless network + // program, either "slirp4nents" (default) or "pasta". + DefaultRootlessNetworkCmd string `toml:"default_rootless_network_cmd,omitempty"` + // NetworkConfigDir is where network configuration files are stored. NetworkConfigDir string `toml:"network_config_dir,omitempty"` @@ -585,6 +598,10 @@ type NetworkConfig struct { // for netavark rootful bridges with dns enabled. This can be necessary // when other dns forwarders run on the machine. 53 is used if unset. DNSBindPort uint16 `toml:"dns_bind_port,omitempty,omitzero"` + + // PastaOptions contains a default list of pasta(1) options that should + // be used when running pasta. + PastaOptions []string `toml:"pasta_options,omitempty"` } type SubnetPool struct { diff --git a/vendor/github.com/containers/common/pkg/config/config_darwin.go b/vendor/github.com/containers/common/pkg/config/config_darwin.go index 13bd3a376..18c466210 100644 --- a/vendor/github.com/containers/common/pkg/config/config_darwin.go +++ b/vendor/github.com/containers/common/pkg/config/config_darwin.go @@ -30,9 +30,9 @@ func ifRootlessConfigPath() (string, error) { var defaultHelperBinariesDir = []string{ // Homebrew install paths - "/usr/local/opt/podman/libexec", + "/usr/local/opt/podman/libexec/podman", + "/opt/homebrew/opt/podman/libexec/podman", "/opt/homebrew/bin", - "/opt/homebrew/opt/podman/libexec", "/usr/local/bin", // default paths "/usr/local/libexec/podman", diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf index 5d6e2efe3..ec881f2fc 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf +++ b/vendor/github.com/containers/common/pkg/config/containers.conf @@ -173,6 +173,12 @@ default_sysctls = [ # #label = true +# label_users indicates whether to enforce confined users in containers on +# SELinux systems. This option causes containers to maintain the current user +# and role field of the calling process. By default SELinux containers run with +# the user system_u, and the role system_r. +#label_users = false + # Logging driver for the container. Available options: k8s-file and journald. # #log_driver = "k8s-file" @@ -308,9 +314,9 @@ default_sysctls = [ # #netavark_plugin_dirs = [ # "/usr/local/libexec/netavark", -# "/usr/libexec/netavark", -# "/usr/local/lib/netavark", -# "/usr/lib/netavark", +# "/usr/libexec/netavark", +# "/usr/local/lib/netavark", +# "/usr/lib/netavark", #] # The network name of the default network to attach pods to. @@ -338,6 +344,13 @@ default_sysctls = [ # {"base" = "10.128.0.0/9", "size" = 24}, #] + + +# Configure which rootless network program to use by default. Valid options are +# `slirp4netns` (default) and `pasta`. +# +#default_rootless_network_cmd = "slirp4netns" + # Path to the directory where network configuration files are located. # For the CNI backend the default is "/etc/cni/net.d" as root # and "$HOME/.config/cni/net.d" as rootless. @@ -353,16 +366,27 @@ default_sysctls = [ # #dns_bind_port = 53 +# A list of default pasta options that should be used running pasta. +# It accepts the pasta cli options, see pasta(1) for the full list of options. +# +#pasta_options = [] + [engine] # Index to the active service # -#active_service = production +#active_service = "production" # The compression format to use when pushing an image. # Valid options are: `gzip`, `zstd` and `zstd:chunked`. # #compression_format = "gzip" +# The compression level to use when pushing an image. +# Valid options depend on the compression format used. +# For gzip, valid options are 1-9, with a default of 5. +# For zstd, valid options are 1-20, with a default of 3. +# +#compression_level = 5 # Cgroup management implementation used for the runtime. # Valid options "systemd" or "cgroupfs" @@ -401,7 +425,7 @@ default_sysctls = [ # Format is a single character [a-Z] or a comma separated sequence of # `ctrl-`, where `` is one of: # `a-z`, `@`, `^`, `[`, `\`, `]`, `^` or `_` -# +# Specifying "" disables this feature. #detach_keys = "ctrl-p,ctrl-q" # Determines whether engine will reserve ports on the host when they are @@ -504,13 +528,13 @@ default_sysctls = [ # faster "shm" lock type. You may need to run "podman system renumber" after # you change the lock type. # -#lock_type** = "shm" +#lock_type = "shm" # MultiImageArchive - if true, the container engine allows for storing archives # (e.g., of the docker-archive transport) with multiple images. By default, # Podman creates single-image archives. # -#multi_image_archive = "false" +#multi_image_archive = false # Default engine namespace # If engine is joined to a namespace, it will see only containers and pods @@ -615,8 +639,8 @@ default_sysctls = [ # map of service destinations # -# [service_destinations] -# [service_destinations.production] +# [engine.service_destinations] +# [engine.service_destinations.production] # URI to access the Podman service # Examples: # rootless "unix://run/user/$UID/podman/podman.sock" (Default) diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd index 7fe7538a1..5e187893b 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd +++ b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd @@ -311,6 +311,13 @@ default_sysctls = [ # #compression_format = "gzip" +# The compression level to use when pushing an image. +# Valid options depend on the compression format used. +# For gzip, valid options are 1-9, with a default of 5. +# For zstd, valid options are 1-20, with a default of 3. +# +#compression_level = 5 + # Environment variables to pass into conmon # #conmon_env_vars = [ diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go index 28249e80e..127920997 100644 --- a/vendor/github.com/containers/common/pkg/config/default.go +++ b/vendor/github.com/containers/common/pkg/config/default.go @@ -32,8 +32,6 @@ const ( ) var ( - // DefaultInitPath is the default path to the container-init binary. - DefaultInitPath = "/usr/libexec/podman/catatonit" // DefaultInfraImage is the default image to run as infrastructure containers in pods. DefaultInfraImage = "" // DefaultRootlessSHMLockPath is the default path for rootless SHM locks. @@ -215,12 +213,13 @@ func DefaultConfig() (*Config, error) { UserNSSize: DefaultUserNSSize, // Deprecated }, Network: NetworkConfig{ - DefaultNetwork: "podman", - DefaultSubnet: DefaultSubnet, - DefaultSubnetPools: DefaultSubnetPools, - DNSBindPort: 0, - CNIPluginDirs: DefaultCNIPluginDirs, - NetavarkPluginDirs: DefaultNetavarkPluginDirs, + DefaultNetwork: "podman", + DefaultSubnet: DefaultSubnet, + DefaultSubnetPools: DefaultSubnetPools, + DefaultRootlessNetworkCmd: "slirp4netns", + DNSBindPort: 0, + CNIPluginDirs: DefaultCNIPluginDirs, + NetavarkPluginDirs: DefaultNetavarkPluginDirs, }, Engine: *defaultEngineConfig, Secrets: defaultSecretConfig(), @@ -283,6 +282,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) { c.VolumePath = filepath.Join(storeOpts.GraphRoot, "volumes") c.VolumePluginTimeout = DefaultVolumePluginTimeout + c.CompressionFormat = "gzip" c.HelperBinariesDir = defaultHelperBinariesDir if additionalHelperBinariesDir != "" { diff --git a/vendor/github.com/containers/common/pkg/config/default_common.go b/vendor/github.com/containers/common/pkg/config/default_common.go new file mode 100644 index 000000000..f65461043 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/default_common.go @@ -0,0 +1,7 @@ +//go:build !freebsd +// +build !freebsd + +package config + +// DefaultInitPath is the default path to the container-init binary. +var DefaultInitPath = "/usr/libexec/podman/catatonit" diff --git a/vendor/github.com/containers/common/pkg/config/default_freebsd.go b/vendor/github.com/containers/common/pkg/config/default_freebsd.go index f3c999bed..637abf981 100644 --- a/vendor/github.com/containers/common/pkg/config/default_freebsd.go +++ b/vendor/github.com/containers/common/pkg/config/default_freebsd.go @@ -1,5 +1,8 @@ package config +// DefaultInitPath is the default path to the container-init binary. +var DefaultInitPath = "/usr/local/libexec/podman/catatonit" + func getDefaultCgroupsMode() string { return "enabled" } diff --git a/vendor/github.com/containers/common/pkg/manifests/manifests.go b/vendor/github.com/containers/common/pkg/manifests/manifests.go index d351bdf17..8296faa82 100644 --- a/vendor/github.com/containers/common/pkg/manifests/manifests.go +++ b/vendor/github.com/containers/common/pkg/manifests/manifests.go @@ -73,7 +73,8 @@ func Create() List { // AddInstance adds an entry for the specified manifest digest, with assorted // additional information specified in parameters, to the list or index. -func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, osName, architecture, osVersion string, osFeatures []string, variant string, features, annotations []string) error { +func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, osName, architecture, osVersion string, osFeatures []string, variant string, features, annotations []string) error { // nolint:revive + // FIXME: the annotations argument is currently ignored if err := l.Remove(manifestDigest); err != nil && !errors.Is(err, os.ErrNotExist) { return err } diff --git a/vendor/github.com/containers/common/pkg/retry/retry.go b/vendor/github.com/containers/common/pkg/retry/retry.go index a838c706a..5cf311b43 100644 --- a/vendor/github.com/containers/common/pkg/retry/retry.go +++ b/vendor/github.com/containers/common/pkg/retry/retry.go @@ -17,8 +17,9 @@ import ( // Options defines the option to retry. type Options struct { - MaxRetry int // The number of times to possibly retry. - Delay time.Duration // The delay to use between retries, if set. + MaxRetry int // The number of times to possibly retry. + Delay time.Duration // The delay to use between retries, if set. + IsErrorRetryable func(error) bool } // RetryOptions is deprecated, use Options. @@ -31,6 +32,12 @@ func RetryIfNecessary(ctx context.Context, operation func() error, options *Opti // IfNecessary retries the operation in exponential backoff with the retry Options. func IfNecessary(ctx context.Context, operation func() error, options *Options) error { + var isRetryable func(error) bool + if options.IsErrorRetryable != nil { + isRetryable = options.IsErrorRetryable + } else { + isRetryable = IsErrorRetryable + } err := operation() for attempt := 0; err != nil && isRetryable(err) && attempt < options.MaxRetry; attempt++ { delay := time.Duration(int(math.Pow(2, float64(attempt)))) * time.Second @@ -49,7 +56,11 @@ func IfNecessary(ctx context.Context, operation func() error, options *Options) return err } -func isRetryable(err error) bool { +// IsErrorRetryable makes a HEURISTIC determination whether it is worth retrying upon encountering an error. +// That heuristic is NOT STABLE and it CAN CHANGE AT ANY TIME. +// Callers that have a hard requirement for specific treatment of a class of errors should make their own check +// instead of relying on this function maintaining its past behavior. +func IsErrorRetryable(err error) bool { switch err { case nil: return false @@ -72,18 +83,18 @@ func isRetryable(err error) bool { } return true case *net.OpError: - return isRetryable(e.Err) + return IsErrorRetryable(e.Err) case *url.Error: // This includes errors returned by the net/http client. if e.Err == io.EOF { // Happens when a server accepts a HTTP connection and sends EOF return true } - return isRetryable(e.Err) + return IsErrorRetryable(e.Err) case syscall.Errno: return isErrnoRetryable(e) case errcode.Errors: // if this error is a group of errors, process them all in turn for i := range e { - if !isRetryable(e[i]) { + if !IsErrorRetryable(e[i]) { return false } } @@ -91,7 +102,7 @@ func isRetryable(err error) bool { case *multierror.Error: // if this error is a group of errors, process them all in turn for i := range e.Errors { - if !isRetryable(e.Errors[i]) { + if !IsErrorRetryable(e.Errors[i]) { return false } } @@ -102,11 +113,11 @@ func isRetryable(err error) bool { } if unwrappable, ok := e.(unwrapper); ok { err = unwrappable.Unwrap() - return isRetryable(err) + return IsErrorRetryable(err) } case unwrapper: // Test this last, because various error types might implement .Unwrap() err = e.Unwrap() - return isRetryable(err) + return IsErrorRetryable(err) } return false diff --git a/vendor/github.com/containers/common/pkg/supplemented/supplemented.go b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go index 58c9af654..6ae9a4160 100644 --- a/vendor/github.com/containers/common/pkg/supplemented/supplemented.go +++ b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go @@ -280,7 +280,7 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty return iss, nil } -func (s *supplementedImageReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { +func (s *supplementedImageReference) DeleteImage(_ context.Context, _ *types.SystemContext) error { return fmt.Errorf("deletion of images not implemented") } diff --git a/vendor/github.com/containers/common/pkg/util/util.go b/vendor/github.com/containers/common/pkg/util/util.go index 98890a686..e396f0fc0 100644 --- a/vendor/github.com/containers/common/pkg/util/util.go +++ b/vendor/github.com/containers/common/pkg/util/util.go @@ -1,6 +1,101 @@ package util -import "regexp" +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/containers/common/libnetwork/types" + "github.com/fsnotify/fsnotify" + "github.com/sirupsen/logrus" +) + +const ( + UnknownPackage = "Unknown" +) + +// Note: This function is copied from containers/podman libpod/util.go +// Please see https://github.com/containers/common/pull/1460 +func queryPackageVersion(cmdArg ...string) string { + output := UnknownPackage + if 1 < len(cmdArg) { + cmd := exec.Command(cmdArg[0], cmdArg[1:]...) + if outp, err := cmd.Output(); err == nil { + output = string(outp) + if cmdArg[0] == "/usr/bin/dpkg" { + r := strings.Split(output, ": ") + queryFormat := `${Package}_${Version}_${Architecture}` + cmd = exec.Command("/usr/bin/dpkg-query", "-f", queryFormat, "-W", r[0]) + if outp, err := cmd.Output(); err == nil { + output = string(outp) + } + } + } + if cmdArg[0] == "/sbin/apk" { + prefix := cmdArg[len(cmdArg)-1] + " is owned by " + output = strings.Replace(output, prefix, "", 1) + } + } + return strings.Trim(output, "\n") +} + +// Note: This function is copied from containers/podman libpod/util.go +// Please see https://github.com/containers/common/pull/1460 +func PackageVersion(program string) string { // program is full path + packagers := [][]string{ + {"/usr/bin/rpm", "-q", "-f"}, + {"/usr/bin/dpkg", "-S"}, // Debian, Ubuntu + {"/usr/bin/pacman", "-Qo"}, // Arch + {"/usr/bin/qfile", "-qv"}, // Gentoo (quick) + {"/usr/bin/equery", "b"}, // Gentoo (slow) + {"/sbin/apk", "info", "-W"}, // Alpine + {"/usr/local/sbin/pkg", "which", "-q"}, // FreeBSD + } + + for _, cmd := range packagers { + cmd = append(cmd, program) + if out := queryPackageVersion(cmd...); out != UnknownPackage { + return out + } + } + return UnknownPackage +} + +// Note: This function is copied from containers/podman libpod/util.go +// Please see https://github.com/containers/common/pull/1460 +func ProgramVersion(program string) (string, error) { + return programVersion(program, false) +} + +func ProgramVersionDnsname(program string) (string, error) { + return programVersion(program, true) +} + +func programVersion(program string, dnsname bool) (string, error) { + cmd := exec.Command(program, "--version") + var stdout bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + if err != nil { + return "", fmt.Errorf("`%v --version` failed: %v %v (%v)", program, stderr.String(), stdout.String(), err) + } + + output := strings.TrimSuffix(stdout.String(), "\n") + // dnsname --version returns the information to stderr + if dnsname { + output = strings.TrimSuffix(stderr.String(), "\n") + } + + return output, nil +} // StringInSlice determines if a string is in a string slice, returns bool func StringInSlice(s string, sl []string) bool { @@ -22,3 +117,73 @@ func StringMatchRegexSlice(s string, re []string) bool { } return false } + +// FilterID is a function used to compare an id against a set of ids, if the +// input is hex we check if the prefix matches. Otherwise we assume it is a +// regex and try to match that. +// see https://github.com/containers/podman/issues/18471 for why we do this +func FilterID(id string, filters []string) bool { + for _, want := range filters { + isRegex := types.NotHexRegex.MatchString(want) + if isRegex { + match, err := regexp.MatchString(want, id) + if err == nil && match { + return true + } + } else if strings.HasPrefix(id, strings.ToLower(want)) { + return true + } + } + return false +} + +// WaitForFile waits until a file has been created or the given timeout has occurred +func WaitForFile(path string, chWait chan error, timeout time.Duration) (bool, error) { + var inotifyEvents chan fsnotify.Event + watcher, err := fsnotify.NewWatcher() + if err == nil { + if err := watcher.Add(filepath.Dir(path)); err == nil { + inotifyEvents = watcher.Events + } + defer func() { + if err := watcher.Close(); err != nil { + logrus.Errorf("Failed to close fsnotify watcher: %v", err) + } + }() + } + + var timeoutChan <-chan time.Time + + if timeout != 0 { + timeoutChan = time.After(timeout) + } + + for { + select { + case e := <-chWait: + return true, e + case <-inotifyEvents: + _, err := os.Stat(path) + if err == nil { + return false, nil + } + if !os.IsNotExist(err) { + return false, err + } + case <-time.After(25 * time.Millisecond): + // Check periodically for the file existence. It is needed + // if the inotify watcher could not have been created. It is + // also useful when using inotify as if for any reasons we missed + // a notification, we won't hang the process. + _, err := os.Stat(path) + if err == nil { + return false, nil + } + if !os.IsNotExist(err) { + return false, err + } + case <-timeoutChan: + return false, fmt.Errorf("timed out waiting for file %s", path) + } + } +} diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go index 8bd54871c..71d6aafa9 100644 --- a/vendor/github.com/containers/common/version/version.go +++ b/vendor/github.com/containers/common/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "0.53.0" +const Version = "0.55.2" diff --git a/vendor/github.com/containers/image/v5/copy/blob.go b/vendor/github.com/containers/image/v5/copy/blob.go index 96674ddbb..f45b97f56 100644 --- a/vendor/github.com/containers/image/v5/copy/blob.go +++ b/vendor/github.com/containers/image/v5/copy/blob.go @@ -43,7 +43,7 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read stream.reader = bar.ProxyReader(stream.reader) // === Decrypt the stream, if required. - decryptionStep, err := ic.c.blobPipelineDecryptionStep(&stream, srcInfo) + decryptionStep, err := ic.blobPipelineDecryptionStep(&stream, srcInfo) if err != nil { return types.BlobInfo{}, err } @@ -78,7 +78,7 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read // Before relaxing this, see the original pull request’s review if there are other reasons to reject this. return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy") } - encryptionStep, err := ic.c.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep) + encryptionStep, err := ic.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep) if err != nil { return types.BlobInfo{}, err } diff --git a/vendor/github.com/containers/image/v5/copy/encryption.go b/vendor/github.com/containers/image/v5/copy/encryption.go index 54aca9e57..86fadff66 100644 --- a/vendor/github.com/containers/image/v5/copy/encryption.go +++ b/vendor/github.com/containers/image/v5/copy/encryption.go @@ -33,28 +33,33 @@ type bpDecryptionStepData struct { // blobPipelineDecryptionStep updates *stream to decrypt if, it necessary. // srcInfo is only used for error messages. // Returns data for other steps; the caller should eventually use updateCryptoOperation. -func (c *copier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) { - if isOciEncrypted(stream.info.MediaType) && c.ociDecryptConfig != nil { - desc := imgspecv1.Descriptor{ - Annotations: stream.info.Annotations, - } - reader, decryptedDigest, err := ocicrypt.DecryptLayer(c.ociDecryptConfig, stream.reader, desc, false) - if err != nil { - return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err) - } - - stream.reader = reader - stream.info.Digest = decryptedDigest - stream.info.Size = -1 - maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool { - return strings.HasPrefix(k, "org.opencontainers.image.enc") - }) +func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) { + if !isOciEncrypted(stream.info.MediaType) || ic.c.ociDecryptConfig == nil { return &bpDecryptionStepData{ - decrypting: true, + decrypting: false, }, nil } + + if ic.cannotModifyManifestReason != "" { + return nil, fmt.Errorf("layer %s should be decrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason) + } + + desc := imgspecv1.Descriptor{ + Annotations: stream.info.Annotations, + } + reader, decryptedDigest, err := ocicrypt.DecryptLayer(ic.c.ociDecryptConfig, stream.reader, desc, false) + if err != nil { + return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err) + } + + stream.reader = reader + stream.info.Digest = decryptedDigest + stream.info.Size = -1 + maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool { + return strings.HasPrefix(k, "org.opencontainers.image.enc") + }) return &bpDecryptionStepData{ - decrypting: false, + decrypting: true, }, nil } @@ -74,34 +79,39 @@ type bpEncryptionStepData struct { // blobPipelineEncryptionStep updates *stream to encrypt if, it required by toEncrypt. // srcInfo is primarily used for error messages. // Returns data for other steps; the caller should eventually call updateCryptoOperationAndAnnotations. -func (c *copier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo, +func (ic *imageCopier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo, decryptionStep *bpDecryptionStepData) (*bpEncryptionStepData, error) { - if toEncrypt && !isOciEncrypted(srcInfo.MediaType) && c.ociEncryptConfig != nil { - var annotations map[string]string - if !decryptionStep.decrypting { - annotations = srcInfo.Annotations - } - desc := imgspecv1.Descriptor{ - MediaType: srcInfo.MediaType, - Digest: srcInfo.Digest, - Size: srcInfo.Size, - Annotations: annotations, - } - reader, finalizer, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, stream.reader, desc) - if err != nil { - return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err) - } - - stream.reader = reader - stream.info.Digest = "" - stream.info.Size = -1 + if !toEncrypt || isOciEncrypted(srcInfo.MediaType) || ic.c.ociEncryptConfig == nil { return &bpEncryptionStepData{ - encrypting: true, - finalizer: finalizer, + encrypting: false, }, nil } + + if ic.cannotModifyManifestReason != "" { + return nil, fmt.Errorf("layer %s should be encrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason) + } + + var annotations map[string]string + if !decryptionStep.decrypting { + annotations = srcInfo.Annotations + } + desc := imgspecv1.Descriptor{ + MediaType: srcInfo.MediaType, + Digest: srcInfo.Digest, + Size: srcInfo.Size, + Annotations: annotations, + } + reader, finalizer, err := ocicrypt.EncryptLayer(ic.c.ociEncryptConfig, stream.reader, desc) + if err != nil { + return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err) + } + + stream.reader = reader + stream.info.Digest = "" + stream.info.Size = -1 return &bpEncryptionStepData{ - encrypting: false, + encrypting: true, + finalizer: finalizer, }, nil } diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go index a35ea4220..6f01cf5cc 100644 --- a/vendor/github.com/containers/image/v5/copy/manifest.go +++ b/vendor/github.com/containers/image/v5/copy/manifest.go @@ -9,6 +9,7 @@ import ( "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" + v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" "golang.org/x/exp/slices" ) @@ -18,6 +19,9 @@ import ( // Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} +// ociEncryptionMIMETypes lists manifest MIME types that are known to support OCI encryption. +var ociEncryptionMIMETypes = []string{v1.MediaTypeImageManifest} + // orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once. type orderedSet struct { list []string @@ -76,11 +80,14 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType} } - if len(destSupportedManifestMIMETypes) == 0 && (!in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType)) { - return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions. - preferredMIMEType: srcType, - otherMIMETypeCandidates: []string{}, - }, nil + if len(destSupportedManifestMIMETypes) == 0 { + if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType) { + return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions. + preferredMIMEType: srcType, + otherMIMETypeCandidates: []string{}, + }, nil + } + destSupportedManifestMIMETypes = ociEncryptionMIMETypes } supportedByDest := set.New[string]() for _, t := range destSupportedManifestMIMETypes { @@ -88,6 +95,27 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest supportedByDest.Add(t) } } + if supportedByDest.Empty() { + if len(destSupportedManifestMIMETypes) == 0 { // Coverage: This should never happen, empty values were replaced by ociEncryptionMIMETypes + return manifestConversionPlan{}, errors.New("internal error: destSupportedManifestMIMETypes is empty") + } + // We know, and have verified, that destSupportedManifestMIMETypes is not empty, so encryption must have been involved. + if !in.requiresOCIEncryption { // Coverage: This should never happen, destSupportedManifestMIMETypes was not empty, so we should have filtered for encryption. + return manifestConversionPlan{}, errors.New("internal error: supportedByDest is empty but destSupportedManifestMIMETypes is not, and not encrypting") + } + // destSupportedManifestMIMETypes has three possible origins: + if in.forceManifestMIMEType != "" { // 1. forceManifestType specified + return manifestConversionPlan{}, fmt.Errorf("encryption required together with format %s, which does not support encryption", + in.forceManifestMIMEType) + } + if len(in.destSupportedManifestMIMETypes) == 0 { // 2. destination accepts anything and we have chosen ociEncryptionMIMETypes + // Coverage: This should never happen, ociEncryptionMIMETypes all support encryption + return manifestConversionPlan{}, errors.New("internal error: in.destSupportedManifestMIMETypes is empty but supportedByDest is empty as well") + } + // 3. destination does not support encryption. + return manifestConversionPlan{}, fmt.Errorf("encryption required but the destination only supports MIME types [%s], none of which support encryption", + strings.Join(destSupportedManifestMIMETypes, ", ")) + } // destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types. // So, build a list of types to try in order of decreasing preference. @@ -122,11 +150,13 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest // Finally, try anything else the destination supports. for _, t := range destSupportedManifestMIMETypes { - prioritizedTypes.append(t) + if supportedByDest.Contains(t) { + prioritizedTypes.append(t) + } } logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) - if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen. + if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes and supportedByDest, which is a subset, is not empty (or we would have exited above), so this should never happen. return manifestConversionPlan{}, errors.New("Internal error: no candidate MIME types") } res := manifestConversionPlan{ diff --git a/vendor/github.com/containers/image/v5/copy/multiple.go b/vendor/github.com/containers/image/v5/copy/multiple.go index 097a18855..41ea1b11b 100644 --- a/vendor/github.com/containers/image/v5/copy/multiple.go +++ b/vendor/github.com/containers/image/v5/copy/multiple.go @@ -12,11 +12,41 @@ import ( internalManifest "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/signature" + digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" "golang.org/x/exp/slices" ) +type instanceCopyKind int + +const ( + instanceCopyCopy instanceCopyKind = iota + instanceCopyClone +) + +type instanceCopy struct { + op instanceCopyKind + sourceDigest digest.Digest +} + +// prepareInstanceCopies prepares a list of instances which needs to copied to the manifest list. +func prepareInstanceCopies(instanceDigests []digest.Digest, options *Options) []instanceCopy { + res := []instanceCopy{} + for i, instanceDigest := range instanceDigests { + if options.ImageListSelection == CopySpecificImages && + !slices.Contains(options.Instances, instanceDigest) { + logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) + continue + } + res = append(res, instanceCopy{ + op: instanceCopyCopy, + sourceDigest: instanceDigest, + }) + } + return res +} + // copyMultipleImages copies some or all of an image list's instances, using // policyContext to validate source image admissibility. func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, retErr error) { @@ -88,44 +118,35 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur // Copy each image, or just the ones we want to copy, in turn. instanceDigests := updatedList.Instances() - imagesToCopy := len(instanceDigests) - if options.ImageListSelection == CopySpecificImages { - imagesToCopy = len(options.Instances) - } - c.Printf("Copying %d of %d images in list\n", imagesToCopy, len(instanceDigests)) - updates := make([]manifest.ListUpdate, len(instanceDigests)) - instancesCopied := 0 - for i, instanceDigest := range instanceDigests { - if options.ImageListSelection == CopySpecificImages && - !slices.Contains(options.Instances, instanceDigest) { - update, err := updatedList.Instance(instanceDigest) + instanceEdits := []internalManifest.ListEdit{} + instanceCopyList := prepareInstanceCopies(instanceDigests, options) + c.Printf("Copying %d of %d images in list\n", len(instanceCopyList), len(instanceDigests)) + for i, instance := range instanceCopyList { + // Update instances to be edited by their `ListOperation` and + // populate necessary fields. + switch instance.op { + case instanceCopyCopy: + logrus.Debugf("Copying instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList)) + c.Printf("Copying image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList)) + unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest) + updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceCopyList[i].sourceDigest) if err != nil { - return nil, err + return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err) } - logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) - // Record the digest/size/type of the manifest that we didn't copy. - updates[i] = update - continue - } - logrus.Debugf("Copying instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) - c.Printf("Copying image %s (%d/%d)\n", instanceDigest, instancesCopied+1, imagesToCopy) - unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceDigest) - updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceDigest) - if err != nil { - return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", instancesCopied+1, imagesToCopy, err) - } - instancesCopied++ - // Record the result of a possible conversion here. - update := manifest.ListUpdate{ - Digest: updatedManifestDigest, - Size: int64(len(updatedManifest)), - MediaType: updatedManifestType, + // Record the result of a possible conversion here. + instanceEdits = append(instanceEdits, internalManifest.ListEdit{ + ListOperation: internalManifest.ListOpUpdate, + UpdateOldDigest: instance.sourceDigest, + UpdateDigest: updatedManifestDigest, + UpdateSize: int64(len(updatedManifest)), + UpdateMediaType: updatedManifestType}) + default: + return nil, fmt.Errorf("copying image: invalid copy operation %d", instance.op) } - updates[i] = update } // Now reset the digest/size/types of the manifests in the list to account for any conversions that we made. - if err = updatedList.UpdateInstances(updates); err != nil { + if err = updatedList.EditInstances(instanceEdits); err != nil { return nil, fmt.Errorf("updating manifest list: %w", err) } diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go index 9afdea73d..b8569a70c 100644 --- a/vendor/github.com/containers/image/v5/copy/single.go +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -256,9 +256,11 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P } sigs = append(sigs, newSigs...) - c.Printf("Storing signatures\n") - if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil { - return nil, "", "", fmt.Errorf("writing signatures: %w", err) + if len(sigs) > 0 { + c.Printf("Storing signatures\n") + if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil { + return nil, "", "", fmt.Errorf("writing signatures: %w", err) + } } return manifestBytes, retManifestType, retManifestDigest, nil diff --git a/vendor/github.com/containers/image/v5/docker/daemon/client.go b/vendor/github.com/containers/image/v5/docker/daemon/client.go index 7d2a98d68..2c245f54f 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/client.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/client.go @@ -21,33 +21,49 @@ func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) { host = sys.DockerDaemonHost } - // Sadly, unix:// sockets don't work transparently with dockerclient.NewClient. - // They work fine with a nil httpClient; with a non-nil httpClient, the transport’s - // TLSClientConfig must be nil (or the client will try using HTTPS over the PF_UNIX socket - // regardless of the values in the *tls.Config), and we would have to call sockets.ConfigureTransport. + opts := []dockerclient.Opt{ + dockerclient.WithHost(host), + dockerclient.WithVersion(defaultAPIVersion), + } + + // We conditionalize building the TLS configuration only to TLS sockets: + // + // The dockerclient.Client implementation differentiates between + // - Client.proto, which is ~how the connection is establishe (IP / AF_UNIX/Windows) + // - Client.scheme, which is what is sent over the connection (HTTP with/without TLS). + // + // Only Client.proto is set from the URL in dockerclient.WithHost(), + // Client.scheme is detected based on a http.Client.TLSClientConfig presence; + // dockerclient.WithHTTPClient with a client that has TLSClientConfig set + // will, by default, trigger an attempt to use TLS. + // + // So, don’t use WithHTTPClient for unix:// sockets at all. // - // We don't really want to configure anything for unix:// sockets, so just pass a nil *http.Client. + // Similarly, if we want to communicate over plain HTTP on a TCP socket (http://), + // we also should not set TLSClientConfig. We continue to use WithHTTPClient + // with our slightly non-default settings to avoid a behavior change on updates of c/image. // - // Similarly, if we want to communicate over plain HTTP on a TCP socket, we also need to set - // TLSClientConfig to nil. This can be achieved by using the form `http://` + // Alternatively we could use dockerclient.WithScheme to drive the TLS/non-TLS logic + // explicitly, but we would still want to set WithHTTPClient (differently) for https:// and http:// ; + // so that would not be any simpler. serverURL, err := dockerclient.ParseHostURL(host) if err != nil { return nil, err } - var httpClient *http.Client - if serverURL.Scheme != "unix" { - if serverURL.Scheme == "http" { - httpClient = httpConfig() - } else { - hc, err := tlsConfig(sys) - if err != nil { - return nil, err - } - httpClient = hc + switch serverURL.Scheme { + case "unix": // Nothing + case "http": + hc := httpConfig() + opts = append(opts, dockerclient.WithHTTPClient(hc)) + default: + hc, err := tlsConfig(sys) + if err != nil { + return nil, err } + opts = append(opts, dockerclient.WithHTTPClient(hc)) } - return dockerclient.NewClient(host, defaultAPIVersion, httpClient, nil) + return dockerclient.NewClientWithOpts(opts...) } func tlsConfig(sys *types.SystemContext) (*http.Client, error) { diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 570cca483..dd9127c5a 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -161,17 +161,6 @@ func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) { return token, nil } -// this is cloned from docker/go-connections because upstream docker has changed -// it and make deps here fails otherwise. -// We'll drop this once we upgrade to docker 1.13.x deps. -func serverDefault() *tls.Config { - return &tls.Config{ - // Avoid fallback to SSL protocols < TLS1.0 - MinVersion: tls.VersionTLS10, - CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, - } -} - // dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort. func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { if sys != nil && sys.DockerCertPath != "" { @@ -254,7 +243,9 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc if registry == dockerHostname { registry = dockerRegistry } - tlsClientConfig := serverDefault() + tlsClientConfig := &tls.Config{ + CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, + } // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry, // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go index 15c9c2279..c3234c377 100644 --- a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go @@ -226,9 +226,9 @@ func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.Ma layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) switch m.m.LayersDescriptors[idx].MediaType { case manifest.DockerV2Schema2ForeignLayerMediaType: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. case manifest.DockerV2SchemaLayerMediaTypeUncompressed: layers[idx].MediaType = imgspecv1.MediaTypeImageLayer case manifest.DockerV2Schema2LayerMediaType: diff --git a/vendor/github.com/containers/image/v5/internal/image/oci.go b/vendor/github.com/containers/image/v5/internal/image/oci.go index 4b74de3e5..166daa0e8 100644 --- a/vendor/github.com/containers/image/v5/internal/image/oci.go +++ b/vendor/github.com/containers/image/v5/internal/image/oci.go @@ -215,11 +215,11 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.Mani for idx := range layers { layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) switch layers[idx].MediaType { - case imgspecv1.MediaTypeImageLayerNonDistributable: + case imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType - case imgspecv1.MediaTypeImageLayerNonDistributableGzip: + case imgspecv1.MediaTypeImageLayerNonDistributableGzip: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip - case imgspecv1.MediaTypeImageLayerNonDistributableZstd: + case imgspecv1.MediaTypeImageLayerNonDistributableZstd: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) case imgspecv1.MediaTypeImageLayer: layers[idx].MediaType = manifest.DockerV2SchemaLayerMediaTypeUncompressed diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go index e98c5c99e..516ca7ac9 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go @@ -69,27 +69,71 @@ func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdat // UpdateInstances updates the sizes, digests, and media types of the manifests // which the list catalogs. -func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error { - if len(updates) != len(list.Manifests) { - return fmt.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates)) +func (index *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error { + editInstances := []ListEdit{} + for i, instance := range updates { + editInstances = append(editInstances, ListEdit{ + UpdateOldDigest: index.Manifests[i].Digest, + UpdateDigest: instance.Digest, + UpdateSize: instance.Size, + UpdateMediaType: instance.MediaType, + ListOperation: ListOpUpdate}) } - for i := range updates { - if err := updates[i].Digest.Validate(); err != nil { - return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err) - } - list.Manifests[i].Digest = updates[i].Digest - if updates[i].Size < 0 { - return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) - } - list.Manifests[i].Size = updates[i].Size - if updates[i].MediaType == "" { - return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType) + return index.editInstances(editInstances) +} + +func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error { + addedEntries := []Schema2ManifestDescriptor{} + for i, editInstance := range editInstances { + switch editInstance.ListOperation { + case ListOpUpdate: + if err := editInstance.UpdateOldDigest.Validate(); err != nil { + return fmt.Errorf("Schema2List.EditInstances: Attempting to update %s which is an invalid digest: %w", editInstance.UpdateOldDigest, err) + } + if err := editInstance.UpdateDigest.Validate(); err != nil { + return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err) + } + targetIndex := slices.IndexFunc(index.Manifests, func(m Schema2ManifestDescriptor) bool { + return m.Digest == editInstance.UpdateOldDigest + }) + if targetIndex == -1 { + return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest) + } + index.Manifests[targetIndex].Digest = editInstance.UpdateDigest + if editInstance.UpdateSize < 0 { + return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize) + } + index.Manifests[targetIndex].Size = editInstance.UpdateSize + if editInstance.UpdateMediaType == "" { + return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType) + } + index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType + case ListOpAdd: + addInstance := Schema2ManifestDescriptor{ + Schema2Descriptor{Digest: editInstance.AddDigest, Size: editInstance.AddSize, MediaType: editInstance.AddMediaType}, + Schema2PlatformSpec{ + OS: editInstance.AddPlatform.OS, + Architecture: editInstance.AddPlatform.Architecture, + OSVersion: editInstance.AddPlatform.OSVersion, + OSFeatures: editInstance.AddPlatform.OSFeatures, + Variant: editInstance.AddPlatform.Variant, + }, + } + addedEntries = append(addedEntries, addInstance) + default: + return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation) } - list.Manifests[i].MediaType = updates[i].MediaType + } + if len(addedEntries) != 0 { + index.Manifests = append(index.Manifests, addedEntries...) } return nil } +func (index *Schema2List) EditInstances(editInstances []ListEdit) error { + return index.editInstances(editInstances) +} + func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { // ChooseInstanceByCompression is same as ChooseInstance for schema2 manifest list. return list.ChooseInstance(ctx) diff --git a/vendor/github.com/containers/image/v5/internal/manifest/list.go b/vendor/github.com/containers/image/v5/internal/manifest/list.go index 07c7d85f4..8786324ea 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/list.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/list.go @@ -3,6 +3,7 @@ package manifest import ( "fmt" + compression "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -55,6 +56,10 @@ type List interface { // SystemContext ( or for the current platform if the SystemContext doesn't specify any detail ) and preferGzip for compression which // when configured to OptionalBoolTrue and chooses best available compression when it is OptionalBoolFalse or left OptionalBoolUndefined. ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) + // Edit information about the list's instances. Contains Slice of ListEdit where each element + // is responsible for either Modifying or Adding a new instance to the Manifest. Operation is + // selected on the basis of configured ListOperation field. + EditInstances([]ListEdit) error } // ListUpdate includes the fields which a List's UpdateInstances() method will modify. @@ -65,6 +70,36 @@ type ListUpdate struct { MediaType string } +type ListOp int + +const ( + listOpInvalid ListOp = iota + ListOpAdd + ListOpUpdate +) + +// ListEdit includes the fields which a List's EditInstances() method will modify. +type ListEdit struct { + ListOperation ListOp + + // if Op == ListEditUpdate (basically the previous UpdateInstances). All fields must be set. + UpdateOldDigest digest.Digest + UpdateDigest digest.Digest + UpdateSize int64 + UpdateMediaType string + UpdateAffectAnnotations bool + UpdateAnnotations map[string]string + UpdateCompressionAlgorithms []compression.Algorithm + + // If Op = ListEditAdd. All fields must be set. + AddDigest digest.Digest + AddSize int64 + AddMediaType string + AddPlatform *imgspecv1.Platform + AddAnnotations map[string]string + AddCompressionAlgorithms []compression.Algorithm +} + // ListPublicFromBlob parses a list of manifests. // This is publicly visible as c/image/manifest.ListFromBlob. func ListPublicFromBlob(manifest []byte, manifestMIMEType string) (ListPublic, error) { diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go index 8e911678e..fd251d951 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go @@ -7,6 +7,7 @@ import ( "runtime" platform "github.com/containers/image/v5/internal/pkg/platform" + compression "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" @@ -22,7 +23,8 @@ const ( // That also suggests that this instance benefits from // Zstd compression, so it can be preferred by compatible consumers over instances that // use gzip, depending on their local policy. - OCI1InstanceAnnotationCompressionZSTD = "io.github.containers.compression.zstd" + OCI1InstanceAnnotationCompressionZSTD = "io.github.containers.compression.zstd" + OCI1InstanceAnnotationCompressionZSTDValue = "true" ) // OCI1IndexPublic is just an alias for the OCI index type, but one which we can @@ -64,26 +66,102 @@ func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate // UpdateInstances updates the sizes, digests, and media types of the manifests // which the list catalogs. func (index *OCI1IndexPublic) UpdateInstances(updates []ListUpdate) error { - if len(updates) != len(index.Manifests) { - return fmt.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates)) + editInstances := []ListEdit{} + for i, instance := range updates { + editInstances = append(editInstances, ListEdit{ + UpdateOldDigest: index.Manifests[i].Digest, + UpdateDigest: instance.Digest, + UpdateSize: instance.Size, + UpdateMediaType: instance.MediaType, + ListOperation: ListOpUpdate}) } - for i := range updates { - if err := updates[i].Digest.Validate(); err != nil { - return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err) - } - index.Manifests[i].Digest = updates[i].Digest - if updates[i].Size < 0 { - return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) + return index.editInstances(editInstances) +} + +func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, annotationsMap map[string]string) { + // TODO: This should also delete the algorithm if map already contains an algorithm and compressionAlgorithm + // list has a different algorithm. To do that, we would need to modify the callers to always provide a reliable + // and full compressionAlghorithms list. + for _, algo := range compressionAlgorithms { + switch algo.Name() { + case compression.ZstdAlgorithmName: + annotationsMap[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue + default: + continue } - index.Manifests[i].Size = updates[i].Size - if updates[i].MediaType == "" { - return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType) + } +} + +func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error { + addedEntries := []imgspecv1.Descriptor{} + updatedAnnotations := false + for i, editInstance := range editInstances { + switch editInstance.ListOperation { + case ListOpUpdate: + if err := editInstance.UpdateOldDigest.Validate(); err != nil { + return fmt.Errorf("OCI1Index.EditInstances: Attempting to update %s which is an invalid digest: %w", editInstance.UpdateOldDigest, err) + } + if err := editInstance.UpdateDigest.Validate(); err != nil { + return fmt.Errorf("OCI1Index.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err) + } + targetIndex := slices.IndexFunc(index.Manifests, func(m imgspecv1.Descriptor) bool { + return m.Digest == editInstance.UpdateOldDigest + }) + if targetIndex == -1 { + return fmt.Errorf("OCI1Index.EditInstances: digest %s not found", editInstance.UpdateOldDigest) + } + index.Manifests[targetIndex].Digest = editInstance.UpdateDigest + if editInstance.UpdateSize < 0 { + return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize) + } + index.Manifests[targetIndex].Size = editInstance.UpdateSize + if editInstance.UpdateMediaType == "" { + return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType) + } + index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType + if editInstance.UpdateAnnotations != nil { + updatedAnnotations = true + if editInstance.UpdateAffectAnnotations { + index.Manifests[targetIndex].Annotations = maps.Clone(editInstance.UpdateAnnotations) + } else { + if index.Manifests[targetIndex].Annotations == nil { + index.Manifests[targetIndex].Annotations = map[string]string{} + } + maps.Copy(index.Manifests[targetIndex].Annotations, editInstance.UpdateAnnotations) + } + } + addCompressionAnnotations(editInstance.UpdateCompressionAlgorithms, index.Manifests[targetIndex].Annotations) + case ListOpAdd: + annotations := map[string]string{} + if editInstance.AddAnnotations != nil { + annotations = maps.Clone(editInstance.AddAnnotations) + } + addCompressionAnnotations(editInstance.AddCompressionAlgorithms, annotations) + addedEntries = append(addedEntries, imgspecv1.Descriptor{ + MediaType: editInstance.AddMediaType, + Size: editInstance.AddSize, + Digest: editInstance.AddDigest, + Platform: editInstance.AddPlatform, + Annotations: annotations}) + default: + return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation) } - index.Manifests[i].MediaType = updates[i].MediaType + } + if len(addedEntries) != 0 { + index.Manifests = append(index.Manifests, addedEntries...) + } + if len(addedEntries) != 0 || updatedAnnotations { + slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) bool { + return !instanceIsZstd(a) && instanceIsZstd(b) + }) } return nil } +func (index *OCI1Index) EditInstances(editInstances []ListEdit) error { + return index.editInstances(editInstances) +} + // instanceIsZstd returns true if instance is a zstd instance otherwise false. func instanceIsZstd(manifest imgspecv1.Descriptor) bool { if value, ok := manifest.Annotations[OCI1InstanceAnnotationCompressionZSTD]; ok && value == "true" { @@ -131,24 +209,20 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi for manifestIndex, d := range index.Manifests { candidate := instanceCandidate{platformIndex: math.MaxInt, manifestPosition: manifestIndex, isZstd: instanceIsZstd(d), digest: d.Digest} if d.Platform != nil { - foundPlatform := false - for platformIndex, wantedPlatform := range wantedPlatforms { - imagePlatform := imgspecv1.Platform{ - Architecture: d.Platform.Architecture, - OS: d.Platform.OS, - OSVersion: d.Platform.OSVersion, - OSFeatures: slices.Clone(d.Platform.OSFeatures), - Variant: d.Platform.Variant, - } - if platform.MatchesPlatform(imagePlatform, wantedPlatform) { - foundPlatform = true - candidate.platformIndex = platformIndex - break - } + imagePlatform := imgspecv1.Platform{ + Architecture: d.Platform.Architecture, + OS: d.Platform.OS, + OSVersion: d.Platform.OSVersion, + OSFeatures: slices.Clone(d.Platform.OSFeatures), + Variant: d.Platform.Variant, } - if !foundPlatform { + platformIndex := slices.IndexFunc(wantedPlatforms, func(wantedPlatform imgspecv1.Platform) bool { + return platform.MatchesPlatform(imagePlatform, wantedPlatform) + }) + if platformIndex == -1 { continue } + candidate.platformIndex = platformIndex } if bestMatch == nil || candidate.isPreferredOver(bestMatch, didPreferGzip) { bestMatch = &candidate diff --git a/vendor/github.com/containers/image/v5/internal/set/set.go b/vendor/github.com/containers/image/v5/internal/set/set.go index 5c7bcabef..3e777fe12 100644 --- a/vendor/github.com/containers/image/v5/internal/set/set.go +++ b/vendor/github.com/containers/image/v5/internal/set/set.go @@ -24,11 +24,11 @@ func NewWithValues[E comparable](values ...E) *Set[E] { return s } -func (s Set[E]) Add(v E) { +func (s *Set[E]) Add(v E) { s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again. } -func (s Set[E]) Delete(v E) { +func (s *Set[E]) Delete(v E) { delete(s.m, v) } diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index eb2354768..a70470d99 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -42,7 +42,12 @@ type OCI1 struct { // useful for validation anyway. func SupportedOCI1MediaType(m string) error { switch m { - case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeLayoutHeader, ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc: + case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, + imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerZstd, + imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. + imgspecv1.MediaTypeImageManifest, + imgspecv1.MediaTypeLayoutHeader, + ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc: return nil default: return fmt.Errorf("unsupported OCIv1 media type: %q", m) @@ -102,9 +107,9 @@ func (m *OCI1) LayerInfos() []LayerInfo { var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{ { - mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable, - compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableGzip, - compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableZstd, + mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. + compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableGzip, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. + compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. }, { mtsUncompressed: imgspecv1.MediaTypeImageLayer, @@ -166,7 +171,8 @@ func getEncryptedMediaType(mediatype string) (string, error) { } unsuffixedMediatype := strings.Split(mediatype, "+")[0] switch unsuffixedMediatype { - case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable: + case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, + imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. return mediatype + "+encrypted", nil } diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go index 4a4ab9b2c..6586b8440 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go @@ -188,14 +188,18 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { return index.Manifests[0], nil } else { // if image specified, look through all manifests for a match + var unsupportedMIMETypes []string for _, md := range index.Manifests { - if md.MediaType != imgspecv1.MediaTypeImageManifest && md.MediaType != imgspecv1.MediaTypeImageIndex { - continue - } if refName, ok := md.Annotations[imgspecv1.AnnotationRefName]; ok && refName == ref.image { - return md, nil + if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex { + return md, nil + } + unsupportedMIMETypes = append(unsupportedMIMETypes, md.MediaType) } } + if len(unsupportedMIMETypes) != 0 { + return imgspecv1.Descriptor{}, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes) + } } return imgspecv1.Descriptor{}, ImageNotFoundError{ref} } diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go index 0b737f020..c6498f6ca 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go @@ -17,8 +17,8 @@ import ( "strings" "time" + "dario.cat/mergo" "github.com/containers/storage/pkg/homedir" - "github.com/imdario/mergo" "github.com/sirupsen/logrus" "golang.org/x/exp/slices" "gopkg.in/yaml.v3" @@ -957,8 +957,6 @@ func tlsConfigFor(c *restConfig) (*tls.Config, error) { } tlsConfig := &tls.Config{ - // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) - MinVersion: tls.VersionTLS10, InsecureSkipVerify: c.Insecure, } diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index 0e3003cec..2e79d0ffb 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -48,9 +48,9 @@ var ( ErrNotSupported = errors.New("not supported") ) -// authPath combines a path to a file with container registry access keys, -// along with expected properties of that path (currently just whether it's) -// legacy format or not. +// authPath combines a path to a file with container registry credentials, +// along with expected properties of that path (currently just whether it's +// legacy format or not). type authPath struct { path string legacyFormat bool @@ -87,12 +87,12 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s switch helper { // Special-case the built-in helpers for auth files. case sysregistriesv2.AuthenticationFileHelper: - desc, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, string, error) { - if ch, exists := auths.CredHelpers[key]; exists { + desc, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) { + if ch, exists := fileContents.CredHelpers[key]; exists { if isNamespaced { return false, "", unsupportedNamespaceErr(ch) } - desc, err := setAuthToCredHelper(ch, key, username, password) + desc, err := setCredsInCredHelper(ch, key, username, password) if err != nil { return false, "", err } @@ -100,7 +100,7 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s } creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) newCreds := dockerAuthConfig{Auth: creds} - auths.AuthConfigs[key] = newCreds + fileContents.AuthConfigs[key] = newCreds return true, "", nil }) // External helpers. @@ -108,7 +108,7 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s if isNamespaced { err = unsupportedNamespaceErr(helper) } else { - desc, err = setAuthToCredHelper(helper, key, username, password) + desc, err = setCredsInCredHelper(helper, key, username, password) } } if err != nil { @@ -156,17 +156,17 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon case sysregistriesv2.AuthenticationFileHelper: for _, path := range getAuthFilePaths(sys, homedir.Get()) { // parse returns an empty map in case the path doesn't exist. - auths, err := path.parse() + fileContents, err := path.parse() if err != nil { return nil, fmt.Errorf("reading JSON file %q: %w", path.path, err) } // Credential helpers in the auth file have a // direct mapping to a registry, so we can just // walk the map. - for registry := range auths.CredHelpers { + for registry := range fileContents.CredHelpers { allKeys.Add(registry) } - for key := range auths.AuthConfigs { + for key := range fileContents.AuthConfigs { key := normalizeAuthFileKey(key, path.legacyFormat) if key == normalizedDockerIORegistry { key = "docker.io" @@ -176,7 +176,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon } // External helpers. default: - creds, err := listAuthsFromCredHelper(helper) + creds, err := listCredsInCredHelper(helper) if err != nil { logrus.Debugf("Error listing credentials stored in credential helper %s: %v", helper, err) if errors.Is(err, exec.ErrNotFound) { @@ -193,19 +193,19 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // Now use `GetCredentials` to the specific auth configs for each // previously listed registry. - authConfigs := make(map[string]types.DockerAuthConfig) + allCreds := make(map[string]types.DockerAuthConfig) for _, key := range allKeys.Values() { - authConf, err := GetCredentials(sys, key) + creds, err := GetCredentials(sys, key) if err != nil { // Note: we rely on the logging in `GetCredentials`. return nil, err } - if authConf != (types.DockerAuthConfig{}) { - authConfigs[key] = authConf + if creds != (types.DockerAuthConfig{}) { + allCreds[key] = creds } } - return authConfigs, nil + return allCreds, nil } // getAuthFilePaths returns a slice of authPaths based on the system context @@ -285,13 +285,13 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t // Anonymous function to query credentials from auth files. getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, string, error) { for _, path := range getAuthFilePaths(sys, homeDir) { - authConfig, err := findCredentialsInFile(key, registry, path) + creds, err := findCredentialsInFile(key, registry, path) if err != nil { return types.DockerAuthConfig{}, "", err } - if authConfig != (types.DockerAuthConfig{}) { - return authConfig, path.path, nil + if creds != (types.DockerAuthConfig{}) { + return creds, path.path, nil } } return types.DockerAuthConfig{}, "", nil @@ -320,7 +320,7 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t // This intentionally uses "registry", not "key"; we don't support namespaced // credentials in helpers, but a "registry" is a valid parent of "key". helperKey = registry - creds, err = getAuthFromCredHelper(helper, registry) + creds, err = getCredsFromCredHelper(helper, registry) } if err != nil { logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err) @@ -360,14 +360,14 @@ func GetAuthentication(sys *types.SystemContext, key string) (string, string, er // getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication, // it exists only to allow testing it with an artificial home directory. func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string) (string, string, error) { - auth, err := getCredentialsWithHomeDir(sys, key, homeDir) + creds, err := getCredentialsWithHomeDir(sys, key, homeDir) if err != nil { return "", "", err } - if auth.IdentityToken != "" { + if creds.IdentityToken != "" { return "", "", fmt.Errorf("non-empty identity token found and this API doesn't support it: %w", ErrNotSupported) } - return auth.Username, auth.Password, nil + return creds.Username, creds.Password, nil } // RemoveAuthentication removes credentials for `key` from all possible @@ -393,7 +393,7 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error { logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper) return } - err := deleteAuthFromCredHelper(helper, key) + err := deleteCredsFromCredHelper(helper, key) if err == nil { logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper) isLoggedIn = true @@ -411,13 +411,13 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error { switch helper { // Special-case the built-in helper for auth files. case sysregistriesv2.AuthenticationFileHelper: - _, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, string, error) { - if innerHelper, exists := auths.CredHelpers[key]; exists { + _, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) { + if innerHelper, exists := fileContents.CredHelpers[key]; exists { removeFromCredHelper(innerHelper) } - if _, ok := auths.AuthConfigs[key]; ok { + if _, ok := fileContents.AuthConfigs[key]; ok { isLoggedIn = true - delete(auths.AuthConfigs, key) + delete(fileContents.AuthConfigs, key) } return true, "", multiErr }) @@ -454,23 +454,23 @@ func RemoveAllAuthentication(sys *types.SystemContext) error { switch helper { // Special-case the built-in helper for auth files. case sysregistriesv2.AuthenticationFileHelper: - _, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, string, error) { - for registry, helper := range auths.CredHelpers { + _, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) { + for registry, helper := range fileContents.CredHelpers { // Helpers in auth files are expected // to exist, so no special treatment // for them. - if err := deleteAuthFromCredHelper(helper, registry); err != nil { + if err := deleteCredsFromCredHelper(helper, registry); err != nil { return false, "", err } } - auths.CredHelpers = make(map[string]string) - auths.AuthConfigs = make(map[string]dockerAuthConfig) + fileContents.CredHelpers = make(map[string]string) + fileContents.AuthConfigs = make(map[string]dockerAuthConfig) return true, "", nil }) // External helpers. default: var creds map[string]string - creds, err = listAuthsFromCredHelper(helper) + creds, err = listCredsInCredHelper(helper) if err != nil { if errors.Is(err, exec.ErrNotFound) { // It's okay if the helper doesn't exist. @@ -480,7 +480,7 @@ func RemoveAllAuthentication(sys *types.SystemContext) error { } } for registry := range creds { - err = deleteAuthFromCredHelper(helper, registry) + err = deleteCredsFromCredHelper(helper, registry) if err != nil { break } @@ -497,7 +497,7 @@ func RemoveAllAuthentication(sys *types.SystemContext) error { return multiErr } -func listAuthsFromCredHelper(credHelper string) (map[string]string, error) { +func listCredsInCredHelper(credHelper string) (map[string]string, error) { helperName := fmt.Sprintf("docker-credential-%s", credHelper) p := helperclient.NewShellProgramFunc(helperName) return helperclient.List(p) @@ -543,40 +543,40 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool, return newAuthPathDefault(fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil } -// parse unmarshals the authentications stored in the auth.json file and returns it +// parse unmarshals the credentials stored in the auth.json file and returns it // or returns an empty dockerConfigFile data structure if auth.json does not exist // if the file exists and is empty, this function returns an error. func (path authPath) parse() (dockerConfigFile, error) { - var auths dockerConfigFile + var fileContents dockerConfigFile raw, err := os.ReadFile(path.path) if err != nil { if os.IsNotExist(err) { - auths.AuthConfigs = map[string]dockerAuthConfig{} - return auths, nil + fileContents.AuthConfigs = map[string]dockerAuthConfig{} + return fileContents, nil } return dockerConfigFile{}, err } if path.legacyFormat { - if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil { + if err = json.Unmarshal(raw, &fileContents.AuthConfigs); err != nil { return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err) } - return auths, nil + return fileContents, nil } - if err = json.Unmarshal(raw, &auths); err != nil { + if err = json.Unmarshal(raw, &fileContents); err != nil { return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err) } - if auths.AuthConfigs == nil { - auths.AuthConfigs = map[string]dockerAuthConfig{} + if fileContents.AuthConfigs == nil { + fileContents.AuthConfigs = map[string]dockerAuthConfig{} } - if auths.CredHelpers == nil { - auths.CredHelpers = make(map[string]string) + if fileContents.CredHelpers == nil { + fileContents.CredHelpers = make(map[string]string) } - return auths, nil + return fileContents, nil } // modifyJSON finds an auth.json file, calls editor on the contents, and @@ -585,7 +585,7 @@ func (path authPath) parse() (dockerConfigFile, error) { // // The editor may also return a human-readable description of the updated location; if it is "", // the file itself is used. -func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, string, error)) (string, error) { +func modifyJSON(sys *types.SystemContext, editor func(fileContents *dockerConfigFile) (bool, string, error)) (string, error) { path, _, err := getPathToAuth(sys) if err != nil { return "", err @@ -599,17 +599,17 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) ( return "", err } - auths, err := path.parse() + fileContents, err := path.parse() if err != nil { return "", fmt.Errorf("reading JSON file %q: %w", path.path, err) } - updated, description, err := editor(&auths) + updated, description, err := editor(&fileContents) if err != nil { return "", fmt.Errorf("updating %q: %w", path.path, err) } if updated { - newData, err := json.MarshalIndent(auths, "", "\t") + newData, err := json.MarshalIndent(fileContents, "", "\t") if err != nil { return "", fmt.Errorf("marshaling JSON %q: %w", path.path, err) } @@ -625,7 +625,7 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) ( return description, nil } -func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) { +func getCredsFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) { helperName := fmt.Sprintf("docker-credential-%s", credHelper) p := helperclient.NewShellProgramFunc(helperName) creds, err := helperclient.Get(p, registry) @@ -650,9 +650,9 @@ func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, } } -// setAuthToCredHelper stores (username, password) for registry in credHelper. +// setCredsInCredHelper stores (username, password) for registry in credHelper. // Returns a human-readable description of the destination, to be returned by SetCredentials. -func setAuthToCredHelper(credHelper, registry, username, password string) (string, error) { +func setCredsInCredHelper(credHelper, registry, username, password string) (string, error) { helperName := fmt.Sprintf("docker-credential-%s", credHelper) p := helperclient.NewShellProgramFunc(helperName) creds := &credentials.Credentials{ @@ -666,7 +666,7 @@ func setAuthToCredHelper(credHelper, registry, username, password string) (strin return fmt.Sprintf("credential helper: %s", credHelper), nil } -func deleteAuthFromCredHelper(credHelper, registry string) error { +func deleteCredsFromCredHelper(credHelper, registry string) error { helperName := fmt.Sprintf("docker-credential-%s", credHelper) p := helperclient.NewShellProgramFunc(helperName) return helperclient.Erase(p, registry) @@ -675,7 +675,7 @@ func deleteAuthFromCredHelper(credHelper, registry string) error { // findCredentialsInFile looks for credentials matching "key" // (which is "registry" or a namespace in "registry") in "path". func findCredentialsInFile(key, registry string, path authPath) (types.DockerAuthConfig, error) { - auths, err := path.parse() + fileContents, err := path.parse() if err != nil { return types.DockerAuthConfig{}, fmt.Errorf("reading JSON file %q: %w", path.path, err) } @@ -683,9 +683,9 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut // First try cred helpers. They should always be normalized. // This intentionally uses "registry", not "key"; we don't support namespaced // credentials in helpers. - if ch, exists := auths.CredHelpers[registry]; exists { + if ch, exists := fileContents.CredHelpers[registry]; exists { logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path.path) - return getAuthFromCredHelper(ch, registry) + return getCredsFromCredHelper(ch, registry) } // Support sub-registry namespaces in auth. @@ -701,7 +701,7 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut // Repo or namespace keys are only supported as exact matches. For registry // keys we prefer exact matches as well. for _, key := range keys { - if val, exists := auths.AuthConfigs[key]; exists { + if val, exists := fileContents.AuthConfigs[key]; exists { return decodeDockerAuth(path.path, key, val) } } @@ -715,7 +715,7 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut // The docker.io registry still uses the /v1/ key with a special host name, // so account for that as well. registry = normalizeRegistry(registry) - for k, v := range auths.AuthConfigs { + for k, v := range fileContents.AuthConfigs { if normalizeAuthFileKey(k, path.legacyFormat) == registry { return decodeDockerAuth(path.path, k, v) } diff --git a/vendor/github.com/containers/image/v5/sif/src.go b/vendor/github.com/containers/image/v5/sif/src.go index b645f80dd..1f6ab7f3b 100644 --- a/vendor/github.com/containers/image/v5/sif/src.go +++ b/vendor/github.com/containers/image/v5/sif/src.go @@ -96,9 +96,11 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere created := sifImg.ModifiedAt() config := imgspecv1.Image{ - Created: &created, - Architecture: sifImg.PrimaryArch(), - OS: "linux", + Created: &created, + Platform: imgspecv1.Platform{ + Architecture: sifImg.PrimaryArch(), + OS: "linux", + }, Config: imgspecv1.ImageConfig{ Cmd: commandLine, }, @@ -180,7 +182,7 @@ func (s *sifImageSource) Close() error { func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { switch info.Digest { case s.configDigest: - return io.NopCloser(bytes.NewBuffer(s.config)), int64(len(s.config)), nil + return io.NopCloser(bytes.NewReader(s.config)), int64(len(s.config)), nil case s.layerDigest: reader, err := os.Open(s.layerFile) if err != nil { diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go index 0fb3a8393..6f9bfaf75 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_src.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go @@ -19,7 +19,6 @@ import ( imgspecs "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) type tarballImageSource struct { @@ -29,42 +28,46 @@ type tarballImageSource struct { impl.DoesNotAffectLayerInfosForCopy stubs.NoGetBlobAtInitialize - reference tarballReference - filenames []string - diffIDs []digest.Digest - diffSizes []int64 - blobIDs []digest.Digest - blobSizes []int64 - blobTypes []string - config []byte - configID digest.Digest - configSize int64 - manifest []byte + reference tarballReference + blobs map[digest.Digest]tarballBlob + manifest []byte +} + +// tarballBlob is a blob that tarballImagSource can return by GetBlob. +type tarballBlob struct { + contents []byte // or nil to read from filename below + filename string // valid if contents == nil + size int64 } func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - // Gather up the digests, sizes, and date information for all of the files. - filenames := []string{} + // Pick up the layer comment from the configuration's history list, if one is set. + comment := "imported from tarball" + if len(r.config.History) > 0 && r.config.History[0].Comment != "" { + comment = r.config.History[0].Comment + } + + // Gather up the digests, sizes, and history information for all of the files. + blobs := map[digest.Digest]tarballBlob{} diffIDs := []digest.Digest{} - diffSizes := []int64{} - blobIDs := []digest.Digest{} - blobSizes := []int64{} - blobTimes := []time.Time{} - blobTypes := []string{} + created := time.Time{} + history := []imgspecv1.History{} + layerDescriptors := []imgspecv1.Descriptor{} for _, filename := range r.filenames { - var file *os.File - var err error - var blobSize int64 - var blobTime time.Time var reader io.Reader + var blobTime time.Time + var blob tarballBlob if filename == "-" { - blobSize = int64(len(r.stdin)) - blobTime = time.Now() reader = bytes.NewReader(r.stdin) + blobTime = time.Now() + blob = tarballBlob{ + contents: r.stdin, + size: int64(len(r.stdin)), + } } else { - file, err = os.Open(filename) + file, err := os.Open(filename) if err != nil { - return nil, fmt.Errorf("error opening %q for reading: %w", filename, err) + return nil, err } defer file.Close() reader = file @@ -72,8 +75,11 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System if err != nil { return nil, fmt.Errorf("error reading size of %q: %w", filename, err) } - blobSize = fileinfo.Size() blobTime = fileinfo.ModTime() + blob = tarballBlob{ + filename: filename, + size: fileinfo.Size(), + } } // Default to assuming the layer is compressed. @@ -96,8 +102,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System uncompressed = nil } // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - n, err := io.Copy(io.Discard, reader) - if err != nil { + if _, err := io.Copy(io.Discard, reader); err != nil { return nil, fmt.Errorf("error reading %q: %v", filename, err) } if uncompressed != nil { @@ -105,38 +110,26 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System } // Grab our uncompressed and possibly-compressed digests and sizes. - filenames = append(filenames, filename) - diffIDs = append(diffIDs, diffIDdigester.Digest()) - diffSizes = append(diffSizes, n) - blobIDs = append(blobIDs, blobIDdigester.Digest()) - blobSizes = append(blobSizes, blobSize) - blobTimes = append(blobTimes, blobTime) - blobTypes = append(blobTypes, layerType) - } + diffID := diffIDdigester.Digest() + blobID := blobIDdigester.Digest() + diffIDs = append(diffIDs, diffID) + blobs[blobID] = blob - // Build the rootfs and history for the configuration blob. - rootfs := imgspecv1.RootFS{ - Type: "layers", - DiffIDs: diffIDs, - } - created := time.Time{} - history := []imgspecv1.History{} - // Pick up the layer comment from the configuration's history list, if one is set. - comment := "imported from tarball" - if len(r.config.History) > 0 && r.config.History[0].Comment != "" { - comment = r.config.History[0].Comment - } - for i := range diffIDs { - createdBy := fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffIDs[i].Hex(), os.PathSeparator) history = append(history, imgspecv1.History{ - Created: &blobTimes[i], - CreatedBy: createdBy, + Created: &blobTime, + CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffID.Hex(), os.PathSeparator), Comment: comment, }) // Use the mtime of the most recently modified file as the image's creation time. - if created.Before(blobTimes[i]) { - created = blobTimes[i] + if created.Before(blobTime) { + created = blobTime } + + layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{ + Digest: blobID, + Size: blob.size, + MediaType: layerType, + }) } // Pick up other defaults from the config in the reference. @@ -150,7 +143,10 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System if config.OS == "" { config.OS = runtime.GOOS } - config.RootFS = rootfs + config.RootFS = imgspecv1.RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } config.History = history // Encode and digest the image configuration blob. @@ -159,24 +155,19 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err) } configID := digest.Canonical.FromBytes(configBytes) - configSize := int64(len(configBytes)) - - // Populate a manifest with the configuration blob and the file as the single layer. - layerDescriptors := []imgspecv1.Descriptor{} - for i := range blobIDs { - layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{ - Digest: blobIDs[i], - Size: blobSizes[i], - MediaType: blobTypes[i], - }) + blobs[configID] = tarballBlob{ + contents: configBytes, + size: int64(len(configBytes)), } + + // Populate a manifest with the configuration blob and the layers. manifest := imgspecv1.Manifest{ Versioned: imgspecs.Versioned{ SchemaVersion: 2, }, Config: imgspecv1.Descriptor{ Digest: configID, - Size: configSize, + Size: int64(len(configBytes)), MediaType: imgspecv1.MediaTypeImageConfig, }, Layers: layerDescriptors, @@ -196,17 +187,9 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System }), NoGetBlobAtInitialize: stubs.NoGetBlobAt(r), - reference: *r, - filenames: filenames, - diffIDs: diffIDs, - diffSizes: diffSizes, - blobIDs: blobIDs, - blobSizes: blobSizes, - blobTypes: blobTypes, - config: configBytes, - configID: configID, - configSize: configSize, - manifest: manifestBytes, + reference: *r, + blobs: blobs, + manifest: manifestBytes, } src.Compat = impl.AddCompat(src) @@ -221,24 +204,18 @@ func (is *tarballImageSource) Close() error { // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - // We should only be asked about things in the manifest. Maybe the configuration blob. - if blobinfo.Digest == is.configID { - return io.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil - } - // Maybe one of the layer blobs. - i := slices.Index(is.blobIDs, blobinfo.Digest) - if i == -1 { + blob, ok := is.blobs[blobinfo.Digest] + if !ok { return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String()) } - // We want to read that layer: open the file or memory block and hand it back. - if is.filenames[i] == "-" { - return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil + if blob.contents != nil { + return io.NopCloser(bytes.NewReader(blob.contents)), int64(len(blob.contents)), nil } - reader, err := os.Open(is.filenames[i]) + reader, err := os.Open(blob.filename) if err != nil { - return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err) + return nil, -1, err } - return reader, is.blobSizes[i], nil + return reader, blob.size, nil } // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index 6ea414b86..33adb5f1d 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -585,9 +585,9 @@ type SystemContext struct { // resolving to Docker Hub in the Docker-compatible REST API of Podman; it should never be used outside this // specific context. PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub bool - // If not "", overrides the default path for the authentication file, but only new format files + // If not "", overrides the default path for the registry authentication file, but only new format files AuthFilePath string - // if not "", overrides the default path for the authentication file, but with the legacy format; + // if not "", overrides the default path for the registry authentication file, but with the legacy format; // the code currently will by default look for legacy format files like .dockercfg in the $HOME dir; // but in addition to the home dir, openshift may mount .dockercfg files (via secret mount) // in locations other than the home dir; openshift components should then set this field in those cases; diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 16a6d5816..3c8fc094d 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,9 +6,9 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 25 + VersionMinor = 26 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml index 94ff801df..0cffafa7b 100644 --- a/vendor/github.com/go-logr/logr/.golangci.yaml +++ b/vendor/github.com/go-logr/logr/.golangci.yaml @@ -6,7 +6,6 @@ linters: disable-all: true enable: - asciicheck - - deadcode - errcheck - forcetypeassert - gocritic @@ -18,10 +17,8 @@ linters: - misspell - revive - staticcheck - - structcheck - typecheck - unused - - varcheck issues: exclude-use-default: false diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go index 9d92a38f1..99fe8be93 100644 --- a/vendor/github.com/go-logr/logr/discard.go +++ b/vendor/github.com/go-logr/logr/discard.go @@ -20,35 +20,5 @@ package logr // used whenever the caller is not interested in the logs. Logger instances // produced by this function always compare as equal. func Discard() Logger { - return Logger{ - level: 0, - sink: discardLogSink{}, - } -} - -// discardLogSink is a LogSink that discards all messages. -type discardLogSink struct{} - -// Verify that it actually implements the interface -var _ LogSink = discardLogSink{} - -func (l discardLogSink) Init(RuntimeInfo) { -} - -func (l discardLogSink) Enabled(int) bool { - return false -} - -func (l discardLogSink) Info(int, string, ...interface{}) { -} - -func (l discardLogSink) Error(error, string, ...interface{}) { -} - -func (l discardLogSink) WithValues(...interface{}) LogSink { - return l -} - -func (l discardLogSink) WithName(string) LogSink { - return l + return New(nil) } diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index 7accdb0c4..e52f0cd01 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -21,13 +21,13 @@ limitations under the License. // github.com/go-logr/logr.LogSink with output through an arbitrary // "write" function. See New and NewJSON for details. // -// Custom LogSinks +// # Custom LogSinks // // For users who need more control, a funcr.Formatter can be embedded inside // your own custom LogSink implementation. This is useful when the LogSink // needs to implement additional methods, for example. // -// Formatting +// # Formatting // // This will respect logr.Marshaler, fmt.Stringer, and error interfaces for // values which are being logged. When rendering a struct, funcr will use Go's @@ -37,6 +37,7 @@ package funcr import ( "bytes" "encoding" + "encoding/json" "fmt" "path/filepath" "reflect" @@ -217,7 +218,7 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { prefix: "", values: nil, depth: 0, - opts: opts, + opts: &opts, } return f } @@ -231,7 +232,7 @@ type Formatter struct { values []interface{} valuesStr string depth int - opts Options + opts *Options } // outputFormat indicates which outputFormat to use. @@ -447,6 +448,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s if flags&flagRawStruct == 0 { buf.WriteByte('{') } + printComma := false // testing i>0 is not enough because of JSON omitted fields for i := 0; i < t.NumField(); i++ { fld := t.Field(i) if fld.PkgPath != "" { @@ -478,9 +480,10 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s if omitempty && isEmpty(v.Field(i)) { continue } - if i > 0 { + if printComma { buf.WriteByte(',') } + printComma = true // if we got here, we are rendering a field if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) continue @@ -500,6 +503,20 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s } return buf.String() case reflect.Slice, reflect.Array: + // If this is outputing as JSON make sure this isn't really a json.RawMessage. + // If so just emit "as-is" and don't pretty it as that will just print + // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want. + if f.outputFormat == outputJSON { + if rm, ok := value.(json.RawMessage); ok { + // If it's empty make sure we emit an empty value as the array style would below. + if len(rm) > 0 { + buf.Write(rm) + } else { + buf.WriteString("null") + } + return buf.String() + } + } buf.WriteByte('[') for i := 0; i < v.Len(); i++ { if i > 0 { diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index c3b56b3d2..e027aea3f 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -21,7 +21,7 @@ limitations under the License. // to back that API. Packages in the Go ecosystem can depend on this package, // while callers can implement logging with whatever backend is appropriate. // -// Usage +// # Usage // // Logging is done using a Logger instance. Logger is a concrete type with // methods, which defers the actual logging to a LogSink interface. The main @@ -30,16 +30,20 @@ limitations under the License. // "structured logging". // // With Go's standard log package, we might write: -// log.Printf("setting target value %s", targetValue) +// +// log.Printf("setting target value %s", targetValue) // // With logr's structured logging, we'd write: -// logger.Info("setting target", "value", targetValue) +// +// logger.Info("setting target", "value", targetValue) // // Errors are much the same. Instead of: -// log.Printf("failed to open the pod bay door for user %s: %v", user, err) +// +// log.Printf("failed to open the pod bay door for user %s: %v", user, err) // // We'd write: -// logger.Error(err, "failed to open the pod bay door", "user", user) +// +// logger.Error(err, "failed to open the pod bay door", "user", user) // // Info() and Error() are very similar, but they are separate methods so that // LogSink implementations can choose to do things like attach additional @@ -47,7 +51,7 @@ limitations under the License. // always logged, regardless of the current verbosity. If there is no error // instance available, passing nil is valid. // -// Verbosity +// # Verbosity // // Often we want to log information only when the application in "verbose // mode". To write log lines that are more verbose, Logger has a V() method. @@ -58,20 +62,22 @@ limitations under the License. // Error messages do not have a verbosity level and are always logged. // // Where we might have written: -// if flVerbose >= 2 { -// log.Printf("an unusual thing happened") -// } +// +// if flVerbose >= 2 { +// log.Printf("an unusual thing happened") +// } // // We can write: -// logger.V(2).Info("an unusual thing happened") // -// Logger Names +// logger.V(2).Info("an unusual thing happened") +// +// # Logger Names // // Logger instances can have name strings so that all messages logged through // that instance have additional context. For example, you might want to add // a subsystem name: // -// logger.WithName("compactor").Info("started", "time", time.Now()) +// logger.WithName("compactor").Info("started", "time", time.Now()) // // The WithName() method returns a new Logger, which can be passed to // constructors or other functions for further use. Repeated use of WithName() @@ -82,25 +88,27 @@ limitations under the License. // joining operation (e.g. whitespace, commas, periods, slashes, brackets, // quotes, etc). // -// Saved Values +// # Saved Values // // Logger instances can store any number of key/value pairs, which will be // logged alongside all messages logged through that instance. For example, // you might want to create a Logger instance per managed object: // // With the standard log package, we might write: -// log.Printf("decided to set field foo to value %q for object %s/%s", -// targetValue, object.Namespace, object.Name) +// +// log.Printf("decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) // // With logr we'd write: -// // Elsewhere: set up the logger to log the object name. -// obj.logger = mainLogger.WithValues( -// "name", obj.name, "namespace", obj.namespace) // -// // later on... -// obj.logger.Info("setting foo", "value", targetValue) +// // Elsewhere: set up the logger to log the object name. +// obj.logger = mainLogger.WithValues( +// "name", obj.name, "namespace", obj.namespace) +// +// // later on... +// obj.logger.Info("setting foo", "value", targetValue) // -// Best Practices +// # Best Practices // // Logger has very few hard rules, with the goal that LogSink implementations // might have a lot of freedom to differentiate. There are, however, some @@ -124,15 +132,15 @@ limitations under the License. // around. For cases where passing a logger is optional, a pointer to Logger // should be used. // -// Key Naming Conventions +// # Key Naming Conventions // // Keys are not strictly required to conform to any specification or regex, but // it is recommended that they: -// * be human-readable and meaningful (not auto-generated or simple ordinals) -// * be constant (not dependent on input data) -// * contain only printable characters -// * not contain whitespace or punctuation -// * use lower case for simple keys and lowerCamelCase for more complex ones +// - be human-readable and meaningful (not auto-generated or simple ordinals) +// - be constant (not dependent on input data) +// - contain only printable characters +// - not contain whitespace or punctuation +// - use lower case for simple keys and lowerCamelCase for more complex ones // // These guidelines help ensure that log data is processed properly regardless // of the log implementation. For example, log implementations will try to @@ -141,51 +149,54 @@ limitations under the License. // While users are generally free to use key names of their choice, it's // generally best to avoid using the following keys, as they're frequently used // by implementations: -// * "caller": the calling information (file/line) of a particular log line -// * "error": the underlying error value in the `Error` method -// * "level": the log level -// * "logger": the name of the associated logger -// * "msg": the log message -// * "stacktrace": the stack trace associated with a particular log line or -// error (often from the `Error` message) -// * "ts": the timestamp for a log line +// - "caller": the calling information (file/line) of a particular log line +// - "error": the underlying error value in the `Error` method +// - "level": the log level +// - "logger": the name of the associated logger +// - "msg": the log message +// - "stacktrace": the stack trace associated with a particular log line or +// error (often from the `Error` message) +// - "ts": the timestamp for a log line // // Implementations are encouraged to make use of these keys to represent the // above concepts, when necessary (for example, in a pure-JSON output form, it // would be necessary to represent at least message and timestamp as ordinary // named values). // -// Break Glass +// # Break Glass // // Implementations may choose to give callers access to the underlying // logging implementation. The recommended pattern for this is: -// // Underlier exposes access to the underlying logging implementation. -// // Since callers only have a logr.Logger, they have to know which -// // implementation is in use, so this interface is less of an abstraction -// // and more of way to test type conversion. -// type Underlier interface { -// GetUnderlying() -// } +// +// // Underlier exposes access to the underlying logging implementation. +// // Since callers only have a logr.Logger, they have to know which +// // implementation is in use, so this interface is less of an abstraction +// // and more of way to test type conversion. +// type Underlier interface { +// GetUnderlying() +// } // // Logger grants access to the sink to enable type assertions like this: -// func DoSomethingWithImpl(log logr.Logger) { -// if underlier, ok := log.GetSink()(impl.Underlier) { -// implLogger := underlier.GetUnderlying() -// ... -// } -// } +// +// func DoSomethingWithImpl(log logr.Logger) { +// if underlier, ok := log.GetSink().(impl.Underlier); ok { +// implLogger := underlier.GetUnderlying() +// ... +// } +// } // // Custom `With*` functions can be implemented by copying the complete // Logger struct and replacing the sink in the copy: -// // WithFooBar changes the foobar parameter in the log sink and returns a -// // new logger with that modified sink. It does nothing for loggers where -// // the sink doesn't support that parameter. -// func WithFoobar(log logr.Logger, foobar int) logr.Logger { -// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok { -// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) -// } -// return log -// } +// +// // WithFooBar changes the foobar parameter in the log sink and returns a +// // new logger with that modified sink. It does nothing for loggers where +// // the sink doesn't support that parameter. +// func WithFoobar(log logr.Logger, foobar int) logr.Logger { +// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok { +// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) +// } +// return log +// } // // Don't use New to construct a new Logger with a LogSink retrieved from an // existing Logger. Source code attribution might not work correctly and @@ -201,11 +212,14 @@ import ( ) // New returns a new Logger instance. This is primarily used by libraries -// implementing LogSink, rather than end users. +// implementing LogSink, rather than end users. Passing a nil sink will create +// a Logger which discards all log lines. func New(sink LogSink) Logger { logger := Logger{} logger.setSink(sink) - sink.Init(runtimeInfo) + if sink != nil { + sink.Init(runtimeInfo) + } return logger } @@ -244,7 +258,7 @@ type Logger struct { // Enabled tests whether this Logger is enabled. For example, commandline // flags might be used to set the logging verbosity and disable some info logs. func (l Logger) Enabled() bool { - return l.sink.Enabled(l.level) + return l.sink != nil && l.sink.Enabled(l.level) } // Info logs a non-error message with the given key/value pairs as context. @@ -254,6 +268,9 @@ func (l Logger) Enabled() bool { // information. The key/value pairs must alternate string keys and arbitrary // values. func (l Logger) Info(msg string, keysAndValues ...interface{}) { + if l.sink == nil { + return + } if l.Enabled() { if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() @@ -273,6 +290,9 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) { // triggered this log line, if present. The err parameter is optional // and nil may be passed instead of an error instance. func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { + if l.sink == nil { + return + } if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() } @@ -284,6 +304,9 @@ func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { // level means a log message is less important. Negative V-levels are treated // as 0. func (l Logger) V(level int) Logger { + if l.sink == nil { + return l + } if level < 0 { level = 0 } @@ -294,6 +317,9 @@ func (l Logger) V(level int) Logger { // WithValues returns a new Logger instance with additional key/value pairs. // See Info for documentation on how key/value pairs work. func (l Logger) WithValues(keysAndValues ...interface{}) Logger { + if l.sink == nil { + return l + } l.setSink(l.sink.WithValues(keysAndValues...)) return l } @@ -304,6 +330,9 @@ func (l Logger) WithValues(keysAndValues ...interface{}) Logger { // contain only letters, digits, and hyphens (see the package documentation for // more information). func (l Logger) WithName(name string) Logger { + if l.sink == nil { + return l + } l.setSink(l.sink.WithName(name)) return l } @@ -324,6 +353,9 @@ func (l Logger) WithName(name string) Logger { // WithCallDepth(1) because it works with implementions that support the // CallDepthLogSink and/or CallStackHelperLogSink interfaces. func (l Logger) WithCallDepth(depth int) Logger { + if l.sink == nil { + return l + } if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { l.setSink(withCallDepth.WithCallDepth(depth)) } @@ -345,6 +377,9 @@ func (l Logger) WithCallDepth(depth int) Logger { // implementation does not support either of these, the original Logger will be // returned. func (l Logger) WithCallStackHelper() (func(), Logger) { + if l.sink == nil { + return func() {}, l + } var helper func() if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { l.setSink(withCallDepth.WithCallDepth(1)) @@ -357,6 +392,11 @@ func (l Logger) WithCallStackHelper() (func(), Logger) { return helper, l } +// IsZero returns true if this logger is an uninitialized zero value +func (l Logger) IsZero() bool { + return l.sink == nil +} + // contextKey is how we find Loggers in a context.Context. type contextKey struct{} @@ -442,7 +482,7 @@ type LogSink interface { WithName(name string) LogSink } -// CallDepthLogSink represents a Logger that knows how to climb the call stack +// CallDepthLogSink represents a LogSink that knows how to climb the call stack // to identify the original call site and can offset the depth by a specified // number of frames. This is useful for users who have helper functions // between the "real" call site and the actual calls to Logger methods. @@ -467,7 +507,7 @@ type CallDepthLogSink interface { WithCallDepth(depth int) LogSink } -// CallStackHelperLogSink represents a Logger that knows how to climb +// CallStackHelperLogSink represents a LogSink that knows how to climb // the call stack to identify the original call site and can skip // intermediate helper functions if they mark themselves as // helper. Go's testing package uses that approach. diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index f78ab684a..d971fbe34 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -341,12 +341,21 @@ type zeroable interface { // IsZero returns true when the value passed into the function is a zero value. // This allows for safer checking of interface values. func IsZero(data interface{}) bool { + v := reflect.ValueOf(data) + // check for nil data + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return true + } + } + // check for things that have an IsZero method instead if vv, ok := data.(zeroable); ok { return vv.IsZero() } + // continue with slightly more complex reflection - v := reflect.ValueOf(data) switch v.Kind() { case reflect.String: return v.Len() == 0 @@ -358,14 +367,13 @@ func IsZero(data interface{}) bool { return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() case reflect.Struct, reflect.Array: return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) case reflect.Invalid: return true + default: + return false } - return false } // AddInitialisms add additional initialisms diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go index 2a26b66d0..5b0d01769 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go +++ b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go @@ -17,6 +17,7 @@ package name import ( "net" "net/url" + "path" "regexp" "strings" ) @@ -50,6 +51,11 @@ func (r Registry) String() string { return r.Name() } +// Repo returns a Repository in the Registry with the given name. +func (r Registry) Repo(repo ...string) Repository { + return Repository{Registry: r, repository: path.Join(repo...)} +} + // Scope returns the scope required to access the registry. func (r Registry) Scope(string) string { // The only resource under 'registry' is 'catalog'. http://goo.gl/N9cN9Z diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go index ab7f03ae2..c8a1beb8a 100644 --- a/vendor/github.com/google/pprof/profile/encode.go +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -17,6 +17,7 @@ package profile import ( "errors" "sort" + "strings" ) func (p *Profile) decoder() []decoder { @@ -183,12 +184,13 @@ var profileDecoder = []decoder{ // repeated Location location = 4 func(b *buffer, m message) error { x := new(Location) - x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer + x.Line = b.tmpLines[:0] // Use shared space temporarily pp := m.(*Profile) pp.Location = append(pp.Location, x) err := decodeMessage(b, x) - var tmp []Line - x.Line = append(tmp, x.Line...) // Shrink to allocated size + b.tmpLines = x.Line[:0] + // Copy to shrink size and detach from shared space. + x.Line = append([]Line(nil), x.Line...) return err }, // repeated Function function = 5 @@ -252,6 +254,14 @@ func (p *Profile) postDecode() error { } else { mappings[m.ID] = m } + + // If this a main linux kernel mapping with a relocation symbol suffix + // ("[kernel.kallsyms]_text"), extract said suffix. + // It is fairly hacky to handle at this level, but the alternatives appear even worse. + if strings.HasPrefix(m.File, "[kernel.kallsyms]") { + m.KernelRelocationSymbol = strings.ReplaceAll(m.File, "[kernel.kallsyms]", "") + } + } functions := make(map[uint64]*Function, len(p.Function)) @@ -298,41 +308,52 @@ func (p *Profile) postDecode() error { st.Unit, err = getString(p.stringTable, &st.unitX, err) } + // Pre-allocate space for all locations. + numLocations := 0 + for _, s := range p.Sample { + numLocations += len(s.locationIDX) + } + locBuffer := make([]*Location, numLocations) + for _, s := range p.Sample { - labels := make(map[string][]string, len(s.labelX)) - numLabels := make(map[string][]int64, len(s.labelX)) - numUnits := make(map[string][]string, len(s.labelX)) - for _, l := range s.labelX { - var key, value string - key, err = getString(p.stringTable, &l.keyX, err) - if l.strX != 0 { - value, err = getString(p.stringTable, &l.strX, err) - labels[key] = append(labels[key], value) - } else if l.numX != 0 || l.unitX != 0 { - numValues := numLabels[key] - units := numUnits[key] - if l.unitX != 0 { - var unit string - unit, err = getString(p.stringTable, &l.unitX, err) - units = padStringArray(units, len(numValues)) - numUnits[key] = append(units, unit) + if len(s.labelX) > 0 { + labels := make(map[string][]string, len(s.labelX)) + numLabels := make(map[string][]int64, len(s.labelX)) + numUnits := make(map[string][]string, len(s.labelX)) + for _, l := range s.labelX { + var key, value string + key, err = getString(p.stringTable, &l.keyX, err) + if l.strX != 0 { + value, err = getString(p.stringTable, &l.strX, err) + labels[key] = append(labels[key], value) + } else if l.numX != 0 || l.unitX != 0 { + numValues := numLabels[key] + units := numUnits[key] + if l.unitX != 0 { + var unit string + unit, err = getString(p.stringTable, &l.unitX, err) + units = padStringArray(units, len(numValues)) + numUnits[key] = append(units, unit) + } + numLabels[key] = append(numLabels[key], l.numX) } - numLabels[key] = append(numLabels[key], l.numX) } - } - if len(labels) > 0 { - s.Label = labels - } - if len(numLabels) > 0 { - s.NumLabel = numLabels - for key, units := range numUnits { - if len(units) > 0 { - numUnits[key] = padStringArray(units, len(numLabels[key])) + if len(labels) > 0 { + s.Label = labels + } + if len(numLabels) > 0 { + s.NumLabel = numLabels + for key, units := range numUnits { + if len(units) > 0 { + numUnits[key] = padStringArray(units, len(numLabels[key])) + } } + s.NumUnit = numUnits } - s.NumUnit = numUnits } - s.Location = make([]*Location, len(s.locationIDX)) + + s.Location = locBuffer[:len(s.locationIDX)] + locBuffer = locBuffer[len(s.locationIDX):] for i, lid := range s.locationIDX { if lid < uint64(len(locationIds)) { s.Location[i] = locationIds[lid] diff --git a/vendor/github.com/google/pprof/profile/filter.go b/vendor/github.com/google/pprof/profile/filter.go index ea8e66c68..c794b9390 100644 --- a/vendor/github.com/google/pprof/profile/filter.go +++ b/vendor/github.com/google/pprof/profile/filter.go @@ -22,6 +22,10 @@ import "regexp" // samples where at least one frame matches focus but none match ignore. // Returns true is the corresponding regexp matched at least one sample. func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) { + if focus == nil && ignore == nil && hide == nil && show == nil { + fm = true // Missing focus implies a match + return + } focusOrIgnore := make(map[uint64]bool) hidden := make(map[uint64]bool) for _, l := range p.Location { diff --git a/vendor/github.com/google/pprof/profile/legacy_profile.go b/vendor/github.com/google/pprof/profile/legacy_profile.go index 0c8f3bb5b..8d07fd6c2 100644 --- a/vendor/github.com/google/pprof/profile/legacy_profile.go +++ b/vendor/github.com/google/pprof/profile/legacy_profile.go @@ -295,11 +295,12 @@ func get64b(b []byte) (uint64, []byte) { // // The general format for profilez samples is a sequence of words in // binary format. The first words are a header with the following data: -// 1st word -- 0 -// 2nd word -- 3 -// 3rd word -- 0 if a c++ application, 1 if a java application. -// 4th word -- Sampling period (in microseconds). -// 5th word -- Padding. +// +// 1st word -- 0 +// 2nd word -- 3 +// 3rd word -- 0 if a c++ application, 1 if a java application. +// 4th word -- Sampling period (in microseconds). +// 5th word -- Padding. func parseCPU(b []byte) (*Profile, error) { var parse func([]byte) (uint64, []byte) var n1, n2, n3, n4, n5 uint64 @@ -403,15 +404,18 @@ func cleanupDuplicateLocations(p *Profile) { // // profilez samples are a repeated sequence of stack frames of the // form: -// 1st word -- The number of times this stack was encountered. -// 2nd word -- The size of the stack (StackSize). -// 3rd word -- The first address on the stack. -// ... -// StackSize + 2 -- The last address on the stack +// +// 1st word -- The number of times this stack was encountered. +// 2nd word -- The size of the stack (StackSize). +// 3rd word -- The first address on the stack. +// ... +// StackSize + 2 -- The last address on the stack +// // The last stack trace is of the form: -// 1st word -- 0 -// 2nd word -- 1 -// 3rd word -- 0 +// +// 1st word -- 0 +// 2nd word -- 1 +// 3rd word -- 0 // // Addresses from stack traces may point to the next instruction after // each call. Optionally adjust by -1 to land somewhere on the actual @@ -861,7 +865,6 @@ func parseThread(b []byte) (*Profile, error) { // Recognize each thread and populate profile samples. for !isMemoryMapSentinel(line) { if strings.HasPrefix(line, "---- no stack trace for") { - line = "" break } if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go index 9978e7330..4b66282cb 100644 --- a/vendor/github.com/google/pprof/profile/merge.go +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -15,6 +15,7 @@ package profile import ( + "encoding/binary" "fmt" "sort" "strconv" @@ -58,7 +59,7 @@ func Merge(srcs []*Profile) (*Profile, error) { for _, src := range srcs { // Clear the profile-specific hash tables - pm.locationsByID = make(map[uint64]*Location, len(src.Location)) + pm.locationsByID = makeLocationIDMap(len(src.Location)) pm.functionsByID = make(map[uint64]*Function, len(src.Function)) pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) @@ -136,7 +137,7 @@ type profileMerger struct { p *Profile // Memoization tables within a profile. - locationsByID map[uint64]*Location + locationsByID locationIDMap functionsByID map[uint64]*Function mappingsByID map[uint64]mapInfo @@ -153,6 +154,16 @@ type mapInfo struct { } func (pm *profileMerger) mapSample(src *Sample) *Sample { + // Check memoization table + k := pm.sampleKey(src) + if ss, ok := pm.samples[k]; ok { + for i, v := range src.Value { + ss.Value[i] += v + } + return ss + } + + // Make new sample. s := &Sample{ Location: make([]*Location, len(src.Location)), Value: make([]int64, len(src.Value)), @@ -177,52 +188,98 @@ func (pm *profileMerger) mapSample(src *Sample) *Sample { s.NumLabel[k] = vv s.NumUnit[k] = uu } - // Check memoization table. Must be done on the remapped location to - // account for the remapped mapping. Add current values to the - // existing sample. - k := s.key() - if ss, ok := pm.samples[k]; ok { - for i, v := range src.Value { - ss.Value[i] += v - } - return ss - } copy(s.Value, src.Value) pm.samples[k] = s pm.p.Sample = append(pm.p.Sample, s) return s } -// key generates sampleKey to be used as a key for maps. -func (sample *Sample) key() sampleKey { - ids := make([]string, len(sample.Location)) - for i, l := range sample.Location { - ids[i] = strconv.FormatUint(l.ID, 16) +func (pm *profileMerger) sampleKey(sample *Sample) sampleKey { + // Accumulate contents into a string. + var buf strings.Builder + buf.Grow(64) // Heuristic to avoid extra allocs + + // encode a number + putNumber := func(v uint64) { + var num [binary.MaxVarintLen64]byte + n := binary.PutUvarint(num[:], v) + buf.Write(num[:n]) + } + + // encode a string prefixed with its length. + putDelimitedString := func(s string) { + putNumber(uint64(len(s))) + buf.WriteString(s) + } + + for _, l := range sample.Location { + // Get the location in the merged profile, which may have a different ID. + if loc := pm.mapLocation(l); loc != nil { + putNumber(loc.ID) + } } + putNumber(0) // Delimiter - labels := make([]string, 0, len(sample.Label)) - for k, v := range sample.Label { - labels = append(labels, fmt.Sprintf("%q%q", k, v)) + for _, l := range sortedKeys1(sample.Label) { + putDelimitedString(l) + values := sample.Label[l] + putNumber(uint64(len(values))) + for _, v := range values { + putDelimitedString(v) + } } - sort.Strings(labels) - numlabels := make([]string, 0, len(sample.NumLabel)) - for k, v := range sample.NumLabel { - numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k])) + for _, l := range sortedKeys2(sample.NumLabel) { + putDelimitedString(l) + values := sample.NumLabel[l] + putNumber(uint64(len(values))) + for _, v := range values { + putNumber(uint64(v)) + } + units := sample.NumUnit[l] + putNumber(uint64(len(units))) + for _, v := range units { + putDelimitedString(v) + } } - sort.Strings(numlabels) - return sampleKey{ - strings.Join(ids, "|"), - strings.Join(labels, ""), - strings.Join(numlabels, ""), + return sampleKey(buf.String()) +} + +type sampleKey string + +// sortedKeys1 returns the sorted keys found in a string->[]string map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys2 and made into a generic function. +func sortedKeys1(m map[string][]string) []string { + if len(m) == 0 { + return nil } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys } -type sampleKey struct { - locations string - labels string - numlabels string +// sortedKeys2 returns the sorted keys found in a string->[]int64 map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys1 and made into a generic function. +func sortedKeys2(m map[string][]int64) []string { + if len(m) == 0 { + return nil + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys } func (pm *profileMerger) mapLocation(src *Location) *Location { @@ -230,7 +287,7 @@ func (pm *profileMerger) mapLocation(src *Location) *Location { return nil } - if l, ok := pm.locationsByID[src.ID]; ok { + if l := pm.locationsByID.get(src.ID); l != nil { return l } @@ -249,10 +306,10 @@ func (pm *profileMerger) mapLocation(src *Location) *Location { // account for the remapped mapping ID. k := l.key() if ll, ok := pm.locations[k]; ok { - pm.locationsByID[src.ID] = ll + pm.locationsByID.set(src.ID, ll) return ll } - pm.locationsByID[src.ID] = l + pm.locationsByID.set(src.ID, l) pm.locations[k] = l pm.p.Location = append(pm.p.Location, l) return l @@ -303,16 +360,17 @@ func (pm *profileMerger) mapMapping(src *Mapping) mapInfo { return mi } m := &Mapping{ - ID: uint64(len(pm.p.Mapping) + 1), - Start: src.Start, - Limit: src.Limit, - Offset: src.Offset, - File: src.File, - BuildID: src.BuildID, - HasFunctions: src.HasFunctions, - HasFilenames: src.HasFilenames, - HasLineNumbers: src.HasLineNumbers, - HasInlineFrames: src.HasInlineFrames, + ID: uint64(len(pm.p.Mapping) + 1), + Start: src.Start, + Limit: src.Limit, + Offset: src.Offset, + File: src.File, + KernelRelocationSymbol: src.KernelRelocationSymbol, + BuildID: src.BuildID, + HasFunctions: src.HasFunctions, + HasFilenames: src.HasFilenames, + HasLineNumbers: src.HasLineNumbers, + HasInlineFrames: src.HasInlineFrames, } pm.p.Mapping = append(pm.p.Mapping, m) @@ -479,3 +537,131 @@ func (p *Profile) compatible(pb *Profile) error { func equalValueType(st1, st2 *ValueType) bool { return st1.Type == st2.Type && st1.Unit == st2.Unit } + +// locationIDMap is like a map[uint64]*Location, but provides efficiency for +// ids that are densely numbered, which is often the case. +type locationIDMap struct { + dense []*Location // indexed by id for id < len(dense) + sparse map[uint64]*Location // indexed by id for id >= len(dense) +} + +func makeLocationIDMap(n int) locationIDMap { + return locationIDMap{ + dense: make([]*Location, n), + sparse: map[uint64]*Location{}, + } +} + +func (lm locationIDMap) get(id uint64) *Location { + if id < uint64(len(lm.dense)) { + return lm.dense[int(id)] + } + return lm.sparse[id] +} + +func (lm locationIDMap) set(id uint64, loc *Location) { + if id < uint64(len(lm.dense)) { + lm.dense[id] = loc + return + } + lm.sparse[id] = loc +} + +// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It +// keeps sample types that appear in all profiles only and drops/reorders the +// sample types as necessary. +// +// In the case of sample types order is not the same for given profiles the +// order is derived from the first profile. +// +// Profiles are modified in-place. +// +// It returns an error if the sample type's intersection is empty. +func CompatibilizeSampleTypes(ps []*Profile) error { + sTypes := commonSampleTypes(ps) + if len(sTypes) == 0 { + return fmt.Errorf("profiles have empty common sample type list") + } + for _, p := range ps { + if err := compatibilizeSampleTypes(p, sTypes); err != nil { + return err + } + } + return nil +} + +// commonSampleTypes returns sample types that appear in all profiles in the +// order how they ordered in the first profile. +func commonSampleTypes(ps []*Profile) []string { + if len(ps) == 0 { + return nil + } + sTypes := map[string]int{} + for _, p := range ps { + for _, st := range p.SampleType { + sTypes[st.Type]++ + } + } + var res []string + for _, st := range ps[0].SampleType { + if sTypes[st.Type] == len(ps) { + res = append(res, st.Type) + } + } + return res +} + +// compatibilizeSampleTypes drops sample types that are not present in sTypes +// list and reorder them if needed. +// +// It sets DefaultSampleType to sType[0] if it is not in sType list. +// +// It assumes that all sample types from the sTypes list are present in the +// given profile otherwise it returns an error. +func compatibilizeSampleTypes(p *Profile, sTypes []string) error { + if len(sTypes) == 0 { + return fmt.Errorf("sample type list is empty") + } + defaultSampleType := sTypes[0] + reMap, needToModify := make([]int, len(sTypes)), false + for i, st := range sTypes { + if st == p.DefaultSampleType { + defaultSampleType = p.DefaultSampleType + } + idx := searchValueType(p.SampleType, st) + if idx < 0 { + return fmt.Errorf("%q sample type is not found in profile", st) + } + reMap[i] = idx + if idx != i { + needToModify = true + } + } + if !needToModify && len(sTypes) == len(p.SampleType) { + return nil + } + p.DefaultSampleType = defaultSampleType + oldSampleTypes := p.SampleType + p.SampleType = make([]*ValueType, len(sTypes)) + for i, idx := range reMap { + p.SampleType[i] = oldSampleTypes[idx] + } + values := make([]int64, len(sTypes)) + for _, s := range p.Sample { + for i, idx := range reMap { + values[i] = s.Value[idx] + } + s.Value = s.Value[:len(values)] + copy(s.Value, values) + } + return nil +} + +func searchValueType(vts []*ValueType, s string) int { + for i, vt := range vts { + if vt.Type == s { + return i + } + } + return -1 +} diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index 2590c8ddb..60ef7e926 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -21,7 +21,6 @@ import ( "compress/gzip" "fmt" "io" - "io/ioutil" "math" "path/filepath" "regexp" @@ -73,9 +72,23 @@ type ValueType struct { type Sample struct { Location []*Location Value []int64 - Label map[string][]string + // Label is a per-label-key map to values for string labels. + // + // In general, having multiple values for the given label key is strongly + // discouraged - see docs for the sample label field in profile.proto. The + // main reason this unlikely state is tracked here is to make the + // decoding->encoding roundtrip not lossy. But we expect that the value + // slices present in this map are always of length 1. + Label map[string][]string + // NumLabel is a per-label-key map to values for numeric labels. See a note + // above on handling multiple values for a label. NumLabel map[string][]int64 - NumUnit map[string][]string + // NumUnit is a per-label-key map to the unit names of corresponding numeric + // label values. The unit info may be missing even if the label is in + // NumLabel, see the docs in profile.proto for details. When the value is + // slice is present and not nil, its length must be equal to the length of + // the corresponding value slice in NumLabel. + NumUnit map[string][]string locationIDX []uint64 labelX []label @@ -106,6 +119,15 @@ type Mapping struct { fileX int64 buildIDX int64 + + // Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File. + // For linux kernel mappings generated by some tools, correct symbolization depends + // on knowing which of the two possible relocation symbols was used for `Start`. + // This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext"). + // + // Note, this public field is not persisted in the proto. For the purposes of + // copying / merging / hashing profiles, it is considered subsumed by `File`. + KernelRelocationSymbol string } // Location corresponds to Profile.Location @@ -144,7 +166,7 @@ type Function struct { // may be a gzip-compressed encoded protobuf or one of many legacy // profile formats which may be unsupported in the future. func Parse(r io.Reader) (*Profile, error) { - data, err := ioutil.ReadAll(r) + data, err := io.ReadAll(r) if err != nil { return nil, err } @@ -159,7 +181,7 @@ func ParseData(data []byte) (*Profile, error) { if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { gz, err := gzip.NewReader(bytes.NewBuffer(data)) if err == nil { - data, err = ioutil.ReadAll(gz) + data, err = io.ReadAll(gz) } if err != nil { return nil, fmt.Errorf("decompressing profile: %v", err) @@ -707,6 +729,35 @@ func (s *Sample) HasLabel(key, value string) bool { return false } +// SetNumLabel sets the specified key to the specified value for all samples in the +// profile. "unit" is a slice that describes the units that each corresponding member +// of "values" is measured in (e.g. bytes or seconds). If there is no relevant +// unit for a given value, that member of "unit" should be the empty string. +// "unit" must either have the same length as "value", or be nil. +func (p *Profile) SetNumLabel(key string, value []int64, unit []string) { + for _, sample := range p.Sample { + if sample.NumLabel == nil { + sample.NumLabel = map[string][]int64{key: value} + } else { + sample.NumLabel[key] = value + } + if sample.NumUnit == nil { + sample.NumUnit = map[string][]string{key: unit} + } else { + sample.NumUnit[key] = unit + } + } +} + +// RemoveNumLabel removes all numerical labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveNumLabel(key string) { + for _, sample := range p.Sample { + delete(sample.NumLabel, key) + delete(sample.NumUnit, key) + } +} + // DiffBaseSample returns true if a sample belongs to the diff base and false // otherwise. func (s *Sample) DiffBaseSample() bool { diff --git a/vendor/github.com/google/pprof/profile/proto.go b/vendor/github.com/google/pprof/profile/proto.go index 539ad3ab3..a15696ba1 100644 --- a/vendor/github.com/google/pprof/profile/proto.go +++ b/vendor/github.com/google/pprof/profile/proto.go @@ -39,11 +39,12 @@ import ( ) type buffer struct { - field int // field tag - typ int // proto wire type code for field - u64 uint64 - data []byte - tmp [16]byte + field int // field tag + typ int // proto wire type code for field + u64 uint64 + data []byte + tmp [16]byte + tmpLines []Line // temporary storage used while decoding "repeated Line". } type decoder func(*buffer, message) error @@ -286,7 +287,6 @@ func decodeInt64s(b *buffer, x *[]int64) error { if b.typ == 2 { // Packed encoding data := b.data - tmp := make([]int64, 0, len(data)) // Maximally sized for len(data) > 0 { var u uint64 var err error @@ -294,9 +294,8 @@ func decodeInt64s(b *buffer, x *[]int64) error { if u, data, err = decodeVarint(data); err != nil { return err } - tmp = append(tmp, int64(u)) + *x = append(*x, int64(u)) } - *x = append(*x, tmp...) return nil } var i int64 @@ -319,7 +318,6 @@ func decodeUint64s(b *buffer, x *[]uint64) error { if b.typ == 2 { data := b.data // Packed encoding - tmp := make([]uint64, 0, len(data)) // Maximally sized for len(data) > 0 { var u uint64 var err error @@ -327,9 +325,8 @@ func decodeUint64s(b *buffer, x *[]uint64) error { if u, data, err = decodeVarint(data); err != nil { return err } - tmp = append(tmp, u) + *x = append(*x, u) } - *x = append(*x, tmp...) return nil } var u uint64 diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go index 02d21a818..b2f9fd546 100644 --- a/vendor/github.com/google/pprof/profile/prune.go +++ b/vendor/github.com/google/pprof/profile/prune.go @@ -62,15 +62,31 @@ func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) { prune := make(map[uint64]bool) pruneBeneath := make(map[uint64]bool) + // simplifyFunc can be expensive, so cache results. + // Note that the same function name can be encountered many times due + // different lines and addresses in the same function. + pruneCache := map[string]bool{} // Map from function to whether or not to prune + pruneFromHere := func(s string) bool { + if r, ok := pruneCache[s]; ok { + return r + } + funcName := simplifyFunc(s) + if dropRx.MatchString(funcName) { + if keepRx == nil || !keepRx.MatchString(funcName) { + pruneCache[s] = true + return true + } + } + pruneCache[s] = false + return false + } + for _, loc := range p.Location { var i int for i = len(loc.Line) - 1; i >= 0; i-- { if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { - funcName := simplifyFunc(fn.Name) - if dropRx.MatchString(funcName) { - if keepRx == nil || !keepRx.MatchString(funcName) { - break - } + if pruneFromHere(fn.Name) { + break } } } diff --git a/vendor/github.com/moby/term/doc.go b/vendor/github.com/moby/term/doc.go new file mode 100644 index 000000000..c9bc03244 --- /dev/null +++ b/vendor/github.com/moby/term/doc.go @@ -0,0 +1,3 @@ +// Package term provides structures and helper functions to work with +// terminal (state, sizes). +package term diff --git a/vendor/github.com/moby/term/tc.go b/vendor/github.com/moby/term/tc.go deleted file mode 100644 index 8a5e09f58..000000000 --- a/vendor/github.com/moby/term/tc.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build !windows -// +build !windows - -package term - -import ( - "golang.org/x/sys/unix" -) - -func tcget(fd uintptr) (*Termios, error) { - p, err := unix.IoctlGetTermios(int(fd), getTermios) - if err != nil { - return nil, err - } - return p, nil -} - -func tcset(fd uintptr, p *Termios) error { - return unix.IoctlSetTermios(int(fd), setTermios, p) -} diff --git a/vendor/github.com/moby/term/term.go b/vendor/github.com/moby/term/term.go index 8c1fc1b5a..f9d8988ef 100644 --- a/vendor/github.com/moby/term/term.go +++ b/vendor/github.com/moby/term/term.go @@ -1,119 +1,85 @@ -//go:build !windows -// +build !windows - -// Package term provides structures and helper functions to work with -// terminal (state, sizes). package term -import ( - "errors" - "fmt" - "io" - "os" - "os/signal" - - "golang.org/x/sys/unix" -) +import "io" -// ErrInvalidState is returned if the state of the terminal is invalid. -var ErrInvalidState = errors.New("Invalid terminal state") - -// State represents the state of the terminal. -type State struct { - termios Termios -} +// State holds the platform-specific state / console mode for the terminal. +type State terminalState // Winsize represents the size of the terminal window. type Winsize struct { Height uint16 Width uint16 - x uint16 - y uint16 + + // Only used on Unix + x uint16 + y uint16 } // StdStreams returns the standard streams (stdin, stdout, stderr). +// +// On Windows, it attempts to turn on VT handling on all std handles if +// supported, or falls back to terminal emulation. On Unix, this returns +// the standard [os.Stdin], [os.Stdout] and [os.Stderr]. func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - return os.Stdin, os.Stdout, os.Stderr + return stdStreams() } // GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - var inFd uintptr - var isTerminalIn bool - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminalIn = IsTerminal(inFd) - } - return inFd, isTerminalIn +func GetFdInfo(in interface{}) (fd uintptr, isTerminal bool) { + return getFdInfo(in) +} + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + return getWinsize(fd) +} + +// SetWinsize tries to set the specified window size for the specified file +// descriptor. It is only implemented on Unix, and returns an error on Windows. +func SetWinsize(fd uintptr, ws *Winsize) error { + return setWinsize(fd, ws) } // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { - _, err := tcget(fd) - return err == nil + return isTerminal(fd) } // RestoreTerminal restores the terminal connected to the given file descriptor // to a previous state. func RestoreTerminal(fd uintptr, state *State) error { - if state == nil { - return ErrInvalidState - } - return tcset(fd, &state.termios) + return restoreTerminal(fd, state) } // SaveState saves the state of the terminal connected to the given file descriptor. func SaveState(fd uintptr) (*State, error) { - termios, err := tcget(fd) - if err != nil { - return nil, err - } - return &State{termios: *termios}, nil + return saveState(fd) } // DisableEcho applies the specified state to the terminal connected to the file // descriptor, with echo disabled. func DisableEcho(fd uintptr, state *State) error { - newState := state.termios - newState.Lflag &^= unix.ECHO - - if err := tcset(fd, &newState); err != nil { - return err - } - handleInterrupt(fd, state) - return nil + return disableEcho(fd, state) } // SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this puts both the input -// and output into raw mode. On Windows, it only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (*State, error) { - oldState, err := MakeRaw(fd) - if err != nil { - return nil, err - } - handleInterrupt(fd, oldState) - return oldState, err +// raw mode and returns the previous state. On UNIX, this is the equivalent of +// [MakeRaw], and puts both the input and output into raw mode. On Windows, it +// only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (previousState *State, err error) { + return setRawTerminal(fd) } // SetRawTerminalOutput puts the output of terminal connected to the given file // descriptor into raw mode. On UNIX, this does nothing and returns nil for the // state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (*State, error) { - return nil, nil +func SetRawTerminalOutput(fd uintptr) (previousState *State, err error) { + return setRawTerminalOutput(fd) } -func handleInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - go func() { - for range sigchan { - // quit cleanly and the new terminal item is on a new line - fmt.Println() - signal.Stop(sigchan) - close(sigchan) - RestoreTerminal(fd, state) - os.Exit(1) - } - }() +// MakeRaw puts the terminal (Windows Console) connected to the +// given file descriptor into raw mode and returns the previous state of +// the terminal so that it can be restored. +func MakeRaw(fd uintptr) (previousState *State, err error) { + return makeRaw(fd) } diff --git a/vendor/github.com/moby/term/term_unix.go b/vendor/github.com/moby/term/term_unix.go new file mode 100644 index 000000000..2ec7706a1 --- /dev/null +++ b/vendor/github.com/moby/term/term_unix.go @@ -0,0 +1,98 @@ +//go:build !windows +// +build !windows + +package term + +import ( + "errors" + "io" + "os" + + "golang.org/x/sys/unix" +) + +// ErrInvalidState is returned if the state of the terminal is invalid. +// +// Deprecated: ErrInvalidState is no longer used. +var ErrInvalidState = errors.New("Invalid terminal state") + +// terminalState holds the platform-specific state / console mode for the terminal. +type terminalState struct { + termios unix.Termios +} + +func stdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + return os.Stdin, os.Stdout, os.Stderr +} + +func getFdInfo(in interface{}) (uintptr, bool) { + var inFd uintptr + var isTerminalIn bool + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = isTerminal(inFd) + } + return inFd, isTerminalIn +} + +func getWinsize(fd uintptr) (*Winsize, error) { + uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) + ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel} + return ws, err +} + +func setWinsize(fd uintptr, ws *Winsize) error { + return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &unix.Winsize{ + Row: ws.Height, + Col: ws.Width, + Xpixel: ws.x, + Ypixel: ws.y, + }) +} + +func isTerminal(fd uintptr) bool { + _, err := tcget(fd) + return err == nil +} + +func restoreTerminal(fd uintptr, state *State) error { + if state == nil { + return errors.New("invalid terminal state") + } + return tcset(fd, &state.termios) +} + +func saveState(fd uintptr) (*State, error) { + termios, err := tcget(fd) + if err != nil { + return nil, err + } + return &State{termios: *termios}, nil +} + +func disableEcho(fd uintptr, state *State) error { + newState := state.termios + newState.Lflag &^= unix.ECHO + + return tcset(fd, &newState) +} + +func setRawTerminal(fd uintptr) (*State, error) { + return makeRaw(fd) +} + +func setRawTerminalOutput(fd uintptr) (*State, error) { + return nil, nil +} + +func tcget(fd uintptr) (*unix.Termios, error) { + p, err := unix.IoctlGetTermios(int(fd), getTermios) + if err != nil { + return nil, err + } + return p, nil +} + +func tcset(fd uintptr, p *unix.Termios) error { + return unix.IoctlSetTermios(int(fd), setTermios, p) +} diff --git a/vendor/github.com/moby/term/term_windows.go b/vendor/github.com/moby/term/term_windows.go index 3cdc8edbd..81ccff042 100644 --- a/vendor/github.com/moby/term/term_windows.go +++ b/vendor/github.com/moby/term/term_windows.go @@ -1,6 +1,7 @@ package term import ( + "fmt" "io" "os" "os/signal" @@ -9,22 +10,15 @@ import ( "golang.org/x/sys/windows" ) -// State holds the console mode for the terminal. -type State struct { +// terminalState holds the platform-specific state / console mode for the terminal. +type terminalState struct { mode uint32 } -// Winsize is used for window size. -type Winsize struct { - Height uint16 - Width uint16 -} - // vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console var vtInputSupported bool -// StdStreams returns the standard streams (stdin, stdout, stderr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { +func stdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { // Turn on VT handling on all std handles, if possible. This might // fail, in which case we will fall back to terminal emulation. var ( @@ -87,16 +81,14 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { stdErr = os.Stderr } - return + return stdIn, stdOut, stdErr } -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { +func getFdInfo(in interface{}) (uintptr, bool) { return windowsconsole.GetHandleInfo(in) } -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { +func getWinsize(fd uintptr) (*Winsize, error) { var info windows.ConsoleScreenBufferInfo if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil { return nil, err @@ -110,21 +102,21 @@ func GetWinsize(fd uintptr) (*Winsize, error) { return winsize, nil } -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { +func setWinsize(fd uintptr, ws *Winsize) error { + return fmt.Errorf("not implemented on Windows") +} + +func isTerminal(fd uintptr) bool { var mode uint32 err := windows.GetConsoleMode(windows.Handle(fd), &mode) return err == nil } -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { +func restoreTerminal(fd uintptr, state *State) error { return windows.SetConsoleMode(windows.Handle(fd), state.mode) } -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { +func saveState(fd uintptr) (*State, error) { var mode uint32 if err := windows.GetConsoleMode(windows.Handle(fd), &mode); err != nil { @@ -134,9 +126,8 @@ func SaveState(fd uintptr) (*State, error) { return &State{mode: mode}, nil } -// DisableEcho disables echo for the terminal connected to the given file descriptor. -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx -func DisableEcho(fd uintptr, state *State) error { +func disableEcho(fd uintptr, state *State) error { + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx mode := state.mode mode &^= windows.ENABLE_ECHO_INPUT mode |= windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT @@ -150,69 +141,27 @@ func DisableEcho(fd uintptr, state *State) error { return nil } -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this puts both the input -// and output into raw mode. On Windows, it only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (*State, error) { - state, err := MakeRaw(fd) +func setRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) if err != nil { return nil, err } // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return state, err + restoreAtInterrupt(fd, oldState) + return oldState, err } -// SetRawTerminalOutput puts the output of terminal connected to the given file -// descriptor into raw mode. On UNIX, this does nothing and returns nil for the -// state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (*State, error) { - state, err := SaveState(fd) +func setRawTerminalOutput(fd uintptr) (*State, error) { + oldState, err := saveState(fd) if err != nil { return nil, err } // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this // version of Windows. - _ = windows.SetConsoleMode(windows.Handle(fd), state.mode|windows.DISABLE_NEWLINE_AUTO_RETURN) - return state, err -} - -// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be restored. -func MakeRaw(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - mode := state.mode - - // See - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - - // Disable these modes - mode &^= windows.ENABLE_ECHO_INPUT - mode &^= windows.ENABLE_LINE_INPUT - mode &^= windows.ENABLE_MOUSE_INPUT - mode &^= windows.ENABLE_WINDOW_INPUT - mode &^= windows.ENABLE_PROCESSED_INPUT - - // Enable these modes - mode |= windows.ENABLE_EXTENDED_FLAGS - mode |= windows.ENABLE_INSERT_MODE - mode |= windows.ENABLE_QUICK_EDIT_MODE - if vtInputSupported { - mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT - } - - err = windows.SetConsoleMode(windows.Handle(fd), mode) - if err != nil { - return nil, err - } - return state, nil + _ = windows.SetConsoleMode(windows.Handle(fd), oldState.mode|windows.DISABLE_NEWLINE_AUTO_RETURN) + return oldState, err } func restoreAtInterrupt(fd uintptr, state *State) { diff --git a/vendor/github.com/moby/term/termios.go b/vendor/github.com/moby/term/termios_unix.go similarity index 50% rename from vendor/github.com/moby/term/termios.go rename to vendor/github.com/moby/term/termios_unix.go index 99c0f7de6..60c823783 100644 --- a/vendor/github.com/moby/term/termios.go +++ b/vendor/github.com/moby/term/termios_unix.go @@ -8,12 +8,11 @@ import ( ) // Termios is the Unix API for terminal I/O. +// +// Deprecated: use [unix.Termios]. type Termios = unix.Termios -// MakeRaw puts the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { +func makeRaw(fd uintptr) (*State, error) { termios, err := tcget(fd) if err != nil { return nil, err @@ -21,10 +20,10 @@ func MakeRaw(fd uintptr) (*State, error) { oldState := State{termios: *termios} - termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON termios.Oflag &^= unix.OPOST - termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) - termios.Cflag &^= (unix.CSIZE | unix.PARENB) + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN + termios.Cflag &^= unix.CSIZE | unix.PARENB termios.Cflag |= unix.CS8 termios.Cc[unix.VMIN] = 1 termios.Cc[unix.VTIME] = 0 diff --git a/vendor/github.com/moby/term/termios_windows.go b/vendor/github.com/moby/term/termios_windows.go new file mode 100644 index 000000000..5be4e7601 --- /dev/null +++ b/vendor/github.com/moby/term/termios_windows.go @@ -0,0 +1,37 @@ +package term + +import "golang.org/x/sys/windows" + +func makeRaw(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + mode := state.mode + + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + + // Disable these modes + mode &^= windows.ENABLE_ECHO_INPUT + mode &^= windows.ENABLE_LINE_INPUT + mode &^= windows.ENABLE_MOUSE_INPUT + mode &^= windows.ENABLE_WINDOW_INPUT + mode &^= windows.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= windows.ENABLE_EXTENDED_FLAGS + mode |= windows.ENABLE_INSERT_MODE + mode |= windows.ENABLE_QUICK_EDIT_MODE + if vtInputSupported { + mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT + } + + err = windows.SetConsoleMode(windows.Handle(fd), mode) + if err != nil { + return nil, err + } + return state, nil +} diff --git a/vendor/github.com/moby/term/windows/ansi_reader.go b/vendor/github.com/moby/term/windows/ansi_reader.go index f32aa537e..fb34c547a 100644 --- a/vendor/github.com/moby/term/windows/ansi_reader.go +++ b/vendor/github.com/moby/term/windows/ansi_reader.go @@ -195,10 +195,10 @@ func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) stri // +Key generates ESC N Key if !control && alt { - return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) + return ansiterm.KEY_ESC_N + strings.ToLower(string(rune(keyEvent.UnicodeChar))) } - return string(keyEvent.UnicodeChar) + return string(rune(keyEvent.UnicodeChar)) } // formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. diff --git a/vendor/github.com/moby/term/windows/console.go b/vendor/github.com/moby/term/windows/console.go index 116b74e8f..21e57bd52 100644 --- a/vendor/github.com/moby/term/windows/console.go +++ b/vendor/github.com/moby/term/windows/console.go @@ -30,8 +30,11 @@ func GetHandleInfo(in interface{}) (uintptr, bool) { // IsConsole returns true if the given file descriptor is a Windows Console. // The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. -// Deprecated: use golang.org/x/sys/windows.GetConsoleMode() or golang.org/x/term.IsTerminal() -var IsConsole = isConsole +// +// Deprecated: use [windows.GetConsoleMode] or [golang.org/x/term.IsTerminal]. +func IsConsole(fd uintptr) bool { + return isConsole(fd) +} func isConsole(fd uintptr) bool { var mode uint32 diff --git a/vendor/github.com/moby/term/winsize.go b/vendor/github.com/moby/term/winsize.go deleted file mode 100644 index bea8d4595..000000000 --- a/vendor/github.com/moby/term/winsize.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build !windows -// +build !windows - -package term - -import ( - "golang.org/x/sys/unix" -) - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) - ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel} - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - uws := &unix.Winsize{Row: ws.Height, Col: ws.Width, Xpixel: ws.x, Ypixel: ws.y} - return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, uws) -} diff --git a/vendor/github.com/onsi/ginkgo/v2/.gitignore b/vendor/github.com/onsi/ginkgo/v2/.gitignore index edf0231cd..18793c248 100644 --- a/vendor/github.com/onsi/ginkgo/v2/.gitignore +++ b/vendor/github.com/onsi/ginkgo/v2/.gitignore @@ -1,5 +1,5 @@ .DS_Store -TODO.md +TODO tmp/**/* *.coverprofile .vscode diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 5e779fe64..cb72bd6f2 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,86 @@ +## 2.11.0 + +In prior versions of Ginkgo specs the CLI filter flags (e.g. `--focus`, `--label-filter`) would _override_ any programmatic focus. This behavior has proved surprising and confusing in at least the following ways: + +- users cannot combine programmatic filters and CLI filters to more efficiently select subsets of tests +- CLI filters can override programmatic focus on CI systems resulting in an exit code of 0 despite the presence of (incorrectly!) committed focused specs. + +Going forward Ginkgo will AND all programmatic and CLI filters. Moreover, the presence of any programmatic focused tests will always result in a non-zero exit code. + +This change is technically a change in Ginkgo's external contract and may require some users to make changes to successfully adopt. Specifically: it's possible some users were intentionally using CLI filters to override programmatic focus. If this is you please open an issue so we can explore solutions to the underlying problem you are trying to solve. + +### Fixes +- Programmatic focus is no longer overwrriten by CLI filters [d6bba86] + +### Maintenance +- Bump github.com/onsi/gomega from 1.27.7 to 1.27.8 (#1218) [4a70a38] +- Bump golang.org/x/sys from 0.8.0 to 0.9.0 (#1219) [97eda4d] + +## 2.10.0 + +### Features +- feat(ginkgo/generators): add --tags flag (#1216) [a782a77] + adds a new --tags flag to ginkgo generate + +### Fixes +- Fix broken link of MIGRATING_TO_V2.md (#1217) [548d78e] + +### Maintenance +- Bump golang.org/x/tools from 0.9.1 to 0.9.3 (#1215) [2b76a5e] + +## 2.9.7 + +### Fixes +- fix race when multiple defercleanups are called in goroutines [07fc3a0] + +## 2.9.6 + +### Fixes +- fix: create parent directory before report files (#1212) [0ac65de] + +### Maintenance +- Bump github.com/onsi/gomega from 1.27.6 to 1.27.7 (#1202) [3e39231] + +## 2.9.5 + +### Fixes +- ensure the correct deterministic sort order is produced when ordered specs are generated by a helper function [7fa0b6b] + +### Maintenance +- fix generators link (#1200) [9f9d8b9] +- Bump golang.org/x/tools from 0.8.0 to 0.9.1 (#1196) [150e3f2] +- fix spelling err in docs (#1199) [0013b1a] +- Bump golang.org/x/sys from 0.7.0 to 0.8.0 (#1193) [9e9e3e5] + +## 2.9.4 + +### Fixes +- fix hang with ginkgo -p (#1192) [15d4bdc] - this addresses a _long_ standing issue related to Ginkgo hanging when a child process spawned by the test does not exit. + +- fix: fail fast may cause Serial spec or cleanup Node interrupted (#1178) [8dea88b] - prior to this there was a small gap in which specs on other processes might start even if one process has tried to abort the suite. + + +### Maintenance +- Document run order when multiple setup nodes are at the same nesting level [903be81] + +## 2.9.3 + +### Features +- Add RenderTimeline to GinkgoT() [c0c77b6] + +### Fixes +- update Measure deprecation message. fixes #1176 [227c662] +- add newlines to GinkgoLogr (#1170) (#1171) [0de0e7c] + +### Maintenance +- Bump commonmarker from 0.23.8 to 0.23.9 in /docs (#1183) [8b925ab] +- Bump nokogiri from 1.14.1 to 1.14.3 in /docs (#1184) [e3795a4] +- Bump golang.org/x/tools from 0.7.0 to 0.8.0 (#1182) [b453793] +- Bump actions/setup-go from 3 to 4 (#1164) [73ed75b] +- Bump github.com/onsi/gomega from 1.27.4 to 1.27.6 (#1173) [0a2bc64] +- Bump github.com/go-logr/logr from 1.2.3 to 1.2.4 (#1174) [f41c557] +- Bump golang.org/x/sys from 0.6.0 to 0.7.0 (#1179) [8e423e5] + ## 2.9.2 ### Maintenance diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go index 48d23f919..be01dec97 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -32,6 +32,9 @@ func BuildGenerateCommand() command.Command { {Name: "template-data", KeyPath: "CustomTemplateData", UsageArgument: "template-data-file", Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"}, + {Name: "tags", KeyPath: "Tags", + UsageArgument: "build-tags", + Usage: "If specified, generate will create a test file that uses the given build tags (i.e. `--tags e2e,!unit` will add `//go:build e2e,!unit`)"}, }, &conf, types.GinkgoFlagSections{}, @@ -59,6 +62,7 @@ You can also pass a of the form "file.go" and generate will emit "fil } type specData struct { + BuildTags string Package string Subject string PackageImportPath string @@ -93,6 +97,7 @@ func generateTestFileForSubject(subject string, conf GeneratorsConfig) { } data := specData{ + BuildTags: getBuildTags(conf.Tags), Package: determinePackageName(packageName, conf.Internal), Subject: formattedName, PackageImportPath: getPackageImportPath(), diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go index c3470adbf..4dab07d03 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go @@ -1,6 +1,7 @@ package generators -var specText = `package {{.Package}} +var specText = `{{.BuildTags}} +package {{.Package}} import ( {{.GinkgoImport}} @@ -14,7 +15,8 @@ var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { }) ` -var agoutiSpecText = `package {{.Package}} +var agoutiSpecText = `{{.BuildTags}} +package {{.Package}} import ( {{.GinkgoImport}} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go index 3046a4487..28c7aa6f4 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go @@ -1,6 +1,7 @@ package generators import ( + "fmt" "go/build" "os" "path/filepath" @@ -14,6 +15,7 @@ type GeneratorsConfig struct { Agouti, NoDot, Internal bool CustomTemplate string CustomTemplateData string + Tags string } func getPackageAndFormattedName() (string, string, string) { @@ -62,3 +64,13 @@ func determinePackageName(name string, internal bool) string { return name + "_test" } + +// getBuildTags returns the resultant string to be added. +// If the input string is not empty, then returns a `//go:build {}` string, +// otherwise returns an empty string. +func getBuildTags(tags string) string { + if tags != "" { + return fmt.Sprintf("//go:build %s\n", tags) + } + return "" +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go index 17073145f..28447ffdd 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -80,6 +80,9 @@ type FullGinkgoTInterface interface { Fi(indentation uint, format string, args ...any) string Fiw(indentation uint, maxWidth uint, format string, args ...any) string + //Generates a formatted string version of the current spec's timeline + RenderTimeline() string + GinkgoRecover() DeferCleanup(args ...any) diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/focus.go b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go index 966ea0c1a..e3da7d14d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/focus.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go @@ -8,22 +8,22 @@ import ( ) /* - If a container marked as focus has a descendant that is also marked as focus, Ginkgo's policy is to - unmark the container's focus. This gives developers a more intuitive experience when debugging specs. - It is common to focus a container to just run a subset of specs, then identify the specific specs within the container to focus - - this policy allows the developer to simply focus those specific specs and not need to go back and turn the focus off of the container: - - As a common example, consider: - - FDescribe("something to debug", function() { - It("works", function() {...}) - It("works", function() {...}) - FIt("doesn't work", function() {...}) - It("works", function() {...}) - }) - - here the developer's intent is to focus in on the `"doesn't work"` spec and not to run the adjacent specs in the focused `"something to debug"` container. - The nested policy applied by this function enables this behavior. +If a container marked as focus has a descendant that is also marked as focus, Ginkgo's policy is to +unmark the container's focus. This gives developers a more intuitive experience when debugging specs. +It is common to focus a container to just run a subset of specs, then identify the specific specs within the container to focus - +this policy allows the developer to simply focus those specific specs and not need to go back and turn the focus off of the container: + +As a common example, consider: + + FDescribe("something to debug", function() { + It("works", function() {...}) + It("works", function() {...}) + FIt("doesn't work", function() {...}) + It("works", function() {...}) + }) + +here the developer's intent is to focus in on the `"doesn't work"` spec and not to run the adjacent specs in the focused `"something to debug"` container. +The nested policy applied by this function enables this behavior. */ func ApplyNestedFocusPolicyToTree(tree *TreeNode) { var walkTree func(tree *TreeNode) bool @@ -44,46 +44,43 @@ func ApplyNestedFocusPolicyToTree(tree *TreeNode) { } /* - Ginkgo supports focussing specs using `FIt`, `FDescribe`, etc. - this is called "programmatic focus" - It also supports focussing specs using regular expressions on the command line (`-focus=`, `-skip=`) that match against spec text - and file filters (`-focus-files=`, `-skip-files=`) that match against code locations for nodes in specs. +Ginkgo supports focussing specs using `FIt`, `FDescribe`, etc. - this is called "programmatic focus" +It also supports focussing specs using regular expressions on the command line (`-focus=`, `-skip=`) that match against spec text and file filters (`-focus-files=`, `-skip-files=`) that match against code locations for nodes in specs. - If any of the CLI flags are provided they take precedence. The file filters run first followed by the regex filters. +When both programmatic and file filters are provided their results are ANDed together. If multiple kinds of filters are provided, the file filters run first followed by the regex filters. - This function sets the `Skip` property on specs by applying Ginkgo's focus policy: - - If there are no CLI arguments and no programmatic focus, do nothing. - - If there are no CLI arguments but a spec somewhere has programmatic focus, skip any specs that have no programmatic focus. - - If there are CLI arguments parse them and skip any specs that either don't match the focus filters or do match the skip filters. +This function sets the `Skip` property on specs by applying Ginkgo's focus policy: +- If there are no CLI arguments and no programmatic focus, do nothing. +- If a spec somewhere has programmatic focus skip any specs that have no programmatic focus. +- If there are CLI arguments parse them and skip any specs that either don't match the focus filters or do match the skip filters. - *Note:* specs with pending nodes are Skipped when created by NewSpec. +*Note:* specs with pending nodes are Skipped when created by NewSpec. */ func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) { focusString := strings.Join(suiteConfig.FocusStrings, "|") skipString := strings.Join(suiteConfig.SkipStrings, "|") - hasFocusCLIFlags := focusString != "" || skipString != "" || len(suiteConfig.SkipFiles) > 0 || len(suiteConfig.FocusFiles) > 0 || suiteConfig.LabelFilter != "" - type SkipCheck func(spec Spec) bool // by default, skip any specs marked pending skipChecks := []SkipCheck{func(spec Spec) bool { return spec.Nodes.HasNodeMarkedPending() }} hasProgrammaticFocus := false - if !hasFocusCLIFlags { - // check for programmatic focus - for _, spec := range specs { - if spec.Nodes.HasNodeMarkedFocus() && !spec.Nodes.HasNodeMarkedPending() { - skipChecks = append(skipChecks, func(spec Spec) bool { return !spec.Nodes.HasNodeMarkedFocus() }) - hasProgrammaticFocus = true - break - } + for _, spec := range specs { + if spec.Nodes.HasNodeMarkedFocus() && !spec.Nodes.HasNodeMarkedPending() { + hasProgrammaticFocus = true + break } } + if hasProgrammaticFocus { + skipChecks = append(skipChecks, func(spec Spec) bool { return !spec.Nodes.HasNodeMarkedFocus() }) + } + if suiteConfig.LabelFilter != "" { labelFilter, _ := types.ParseLabelFilter(suiteConfig.LabelFilter) - skipChecks = append(skipChecks, func(spec Spec) bool { - return !labelFilter(UnionOfLabels(suiteLabels, spec.Nodes.UnionOfLabels())) + skipChecks = append(skipChecks, func(spec Spec) bool { + return !labelFilter(UnionOfLabels(suiteLabels, spec.Nodes.UnionOfLabels())) }) } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go index ac6f51040..8ed86111f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go @@ -10,7 +10,7 @@ import ( "github.com/onsi/ginkgo/v2/internal/parallel_support" ) -const ABORT_POLLING_INTERVAL = 500 * time.Millisecond +var ABORT_POLLING_INTERVAL = 500 * time.Millisecond type InterruptCause uint @@ -62,13 +62,14 @@ type InterruptHandlerInterface interface { } type InterruptHandler struct { - c chan interface{} - lock *sync.Mutex - level InterruptLevel - cause InterruptCause - client parallel_support.Client - stop chan interface{} - signals []os.Signal + c chan interface{} + lock *sync.Mutex + level InterruptLevel + cause InterruptCause + client parallel_support.Client + stop chan interface{} + signals []os.Signal + requestAbortCheck chan interface{} } func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler { @@ -76,11 +77,12 @@ func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) * signals = []os.Signal{os.Interrupt, syscall.SIGTERM} } handler := &InterruptHandler{ - c: make(chan interface{}), - lock: &sync.Mutex{}, - stop: make(chan interface{}), - client: client, - signals: signals, + c: make(chan interface{}), + lock: &sync.Mutex{}, + stop: make(chan interface{}), + requestAbortCheck: make(chan interface{}), + client: client, + signals: signals, } handler.registerForInterrupts() return handler @@ -109,6 +111,12 @@ func (handler *InterruptHandler) registerForInterrupts() { pollTicker.Stop() return } + case <-handler.requestAbortCheck: + if handler.client.ShouldAbort() { + close(abortChannel) + pollTicker.Stop() + return + } case <-handler.stop: pollTicker.Stop() return @@ -152,11 +160,18 @@ func (handler *InterruptHandler) registerForInterrupts() { func (handler *InterruptHandler) Status() InterruptStatus { handler.lock.Lock() - defer handler.lock.Unlock() - - return InterruptStatus{ + status := InterruptStatus{ Level: handler.level, Channel: handler.c, Cause: handler.cause, } + handler.lock.Unlock() + + if handler.client != nil && handler.client.ShouldAbort() && !status.Interrupted() { + close(handler.requestAbortCheck) + <-status.Channel + return handler.Status() + } + + return status } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go index 0869bffb3..14c7cf54e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/node.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -875,6 +875,15 @@ func (n Nodes) FirstNodeMarkedOrdered() Node { return Node{} } +func (n Nodes) IndexOfFirstNodeMarkedOrdered() int { + for i := range n { + if n[i].MarkedOrdered { + return i + } + } + return -1 +} + func (n Nodes) GetMaxFlakeAttempts() int { maxFlakeAttempts := 0 for i := range n { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go index 7ed43c7fd..84eea0a59 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go @@ -27,36 +27,43 @@ func (s *SortableSpecs) Swap(i, j int) { s.Indexes[i], s.Indexes[j] = s.Indexes[ func (s *SortableSpecs) Less(i, j int) bool { a, b := s.Specs[s.Indexes[i]], s.Specs[s.Indexes[j]] - firstOrderedA := a.Nodes.FirstNodeMarkedOrdered() - firstOrderedB := b.Nodes.FirstNodeMarkedOrdered() - if firstOrderedA.ID == firstOrderedB.ID && !firstOrderedA.IsZero() { - // strictly preserve order in ordered containers. ID will track this as IDs are generated monotonically - return a.FirstNodeWithType(types.NodeTypeIt).ID < b.FirstNodeWithType(types.NodeTypeIt).ID + aNodes, bNodes := a.Nodes.WithType(types.NodeTypesForContainerAndIt), b.Nodes.WithType(types.NodeTypesForContainerAndIt) + + firstOrderedAIdx, firstOrderedBIdx := aNodes.IndexOfFirstNodeMarkedOrdered(), bNodes.IndexOfFirstNodeMarkedOrdered() + if firstOrderedAIdx > -1 && firstOrderedBIdx > -1 && aNodes[firstOrderedAIdx].ID == bNodes[firstOrderedBIdx].ID { + // strictly preserve order within an ordered containers. ID will track this as IDs are generated monotonically + return aNodes.FirstNodeWithType(types.NodeTypeIt).ID < bNodes.FirstNodeWithType(types.NodeTypeIt).ID + } + + // if either spec is in an ordered container - only use the nodes up to the outermost ordered container + if firstOrderedAIdx > -1 { + aNodes = aNodes[:firstOrderedAIdx+1] + } + if firstOrderedBIdx > -1 { + bNodes = bNodes[:firstOrderedBIdx+1] } - aCLs := a.Nodes.WithType(types.NodeTypesForContainerAndIt).CodeLocations() - bCLs := b.Nodes.WithType(types.NodeTypesForContainerAndIt).CodeLocations() - for i := 0; i < len(aCLs) && i < len(bCLs); i++ { - aCL, bCL := aCLs[i], bCLs[i] - if aCL.FileName < bCL.FileName { - return true - } else if aCL.FileName > bCL.FileName { - return false + for i := 0; i < len(aNodes) && i < len(bNodes); i++ { + aCL, bCL := aNodes[i].CodeLocation, bNodes[i].CodeLocation + if aCL.FileName != bCL.FileName { + return aCL.FileName < bCL.FileName } - if aCL.LineNumber < bCL.LineNumber { - return true - } else if aCL.LineNumber > bCL.LineNumber { - return false + if aCL.LineNumber != bCL.LineNumber { + return aCL.LineNumber < bCL.LineNumber } } // either everything is equal or we have different lengths of CLs - if len(aCLs) < len(bCLs) { - return true - } else if len(aCLs) > len(bCLs) { - return false + if len(aNodes) != len(bNodes) { + return len(aNodes) < len(bNodes) } // ok, now we are sure everything was equal. so we use the spec text to break ties - return a.Text() < b.Text() + for i := 0; i < len(aNodes); i++ { + if aNodes[i].Text != bNodes[i].Text { + return aNodes[i].Text < bNodes[i].Text + } + } + // ok, all those texts were equal. we'll use the ID of the most deeply nested node as a last resort + return aNodes[len(aNodes)-1].ID < bNodes[len(bNodes)-1].ID } type GroupedSpecIndices []SpecIndices diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go index f5ae15b8b..8a237f446 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go @@ -26,6 +26,17 @@ func (impl *dupSyscallOutputInterceptorImpl) CreateStdoutStderrClones() (*os.Fil stdoutCloneFD, _ := unix.Dup(1) stderrCloneFD, _ := unix.Dup(2) + // Important, set the fds to FD_CLOEXEC to prevent them leaking into childs + // https://github.com/onsi/ginkgo/issues/1191 + flags, err := unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_GETFD, 0) + if err == nil { + unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC) + } + flags, err = unix.FcntlInt(uintptr(stderrCloneFD), unix.F_GETFD, 0) + if err == nil { + unix.FcntlInt(uintptr(stderrCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC) + } + // And then wrap the clone file descriptors in files. // One benefit of this (that we don't use yet) is that we can actually write // to these files to emit output to the console even though we're intercepting output diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index a1dbd4c62..ea0d259d9 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -245,7 +245,9 @@ func (suite *Suite) pushCleanupNode(node Node) error { node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID node.NestingLevel = suite.currentNode.NestingLevel + suite.selectiveLock.Lock() suite.cleanupNodes = append(suite.cleanupNodes, node) + suite.selectiveLock.Unlock() return nil } @@ -937,6 +939,12 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ gracePeriodChannel = time.After(gracePeriod) case <-interruptStatus.Channel: interruptStatus = suite.interruptHandler.Status() + // ignore interruption from other process if we are cleaning up or reporting + if interruptStatus.Cause == interrupt_handler.InterruptCauseAbortByOtherProcess && + node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) { + continue + } + deadlineChannel = nil // don't worry about deadlines, time's up now failureTimelineLocation := suite.generateTimelineLocation() diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go index 92acc0a00..73e265565 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go @@ -7,6 +7,7 @@ import ( "github.com/onsi/ginkgo/v2/formatter" "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" ) @@ -185,6 +186,9 @@ func (t *ginkgoTestingTProxy) Fi(indentation uint, format string, args ...any) s func (t *ginkgoTestingTProxy) Fiw(indentation uint, maxWidth uint, format string, args ...any) string { return t.f.Fiw(indentation, maxWidth, format, args...) } +func (t *ginkgoTestingTProxy) RenderTimeline() string { + return reporters.RenderTimeline(t.report(), false) +} func (t *ginkgoTestingTProxy) GinkgoRecover() { t.ginkgoRecover() } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go index 28a45b0fa..574f172df 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go @@ -135,6 +135,6 @@ func (w *Writer) Println(a ...interface{}) { func GinkgoLogrFunc(writer *Writer) logr.Logger { return funcr.New(func(prefix, args string) { - writer.Printf("%s", args) + writer.Printf("%s\n", args) }, funcr.Options{}) } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go index 7f96c450f..be506f9b4 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go @@ -4,12 +4,16 @@ import ( "encoding/json" "fmt" "os" + "path" "github.com/onsi/ginkgo/v2/types" ) -//GenerateJSONReport produces a JSON-formatted report at the passed in destination +// GenerateJSONReport produces a JSON-formatted report at the passed in destination func GenerateJSONReport(report types.Report, destination string) error { + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return err + } f, err := os.Create(destination) if err != nil { return err @@ -25,8 +29,8 @@ func GenerateJSONReport(report types.Report, destination string) error { return f.Close() } -//MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources -//It skips over reports that fail to decode but reports on them via the returned messages []string +// MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources +// It skips over reports that fail to decode but reports on them via the returned messages []string func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) { messages := []string{} allReports := []types.Report{} @@ -46,6 +50,9 @@ func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, allReports = append(allReports, reports...) } + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return messages, err + } f, err := os.Create(destination) if err != nil { return messages, err diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index ca98609d0..816042208 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -14,6 +14,7 @@ import ( "encoding/xml" "fmt" "os" + "path" "strings" "github.com/onsi/ginkgo/v2/config" @@ -285,6 +286,9 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit TestSuites: []JUnitTestSuite{suite}, } + if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { + return err + } f, err := os.Create(dst) if err != nil { return err @@ -322,6 +326,9 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...) } + if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { + return messages, err + } f, err := os.Create(dst) if err != nil { return messages, err @@ -344,8 +351,12 @@ func failureDescriptionForUnstructuredReporters(spec types.SpecReport) string { } func systemErrForUnstructuredReporters(spec types.SpecReport) string { + return RenderTimeline(spec, true) +} + +func RenderTimeline(spec types.SpecReport, noColor bool) string { out := &strings.Builder{} - NewDefaultReporter(types.ReporterConfig{NoColor: true, VeryVerbose: true}, out).emitTimeline(0, spec, spec.Timeline()) + NewDefaultReporter(types.ReporterConfig{NoColor: noColor, VeryVerbose: true}, out).emitTimeline(0, spec, spec.Timeline()) return out.String() } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go index c1863496d..e990ad82e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go @@ -11,6 +11,7 @@ package reporters import ( "fmt" "os" + "path" "strings" "github.com/onsi/ginkgo/v2/types" @@ -27,6 +28,9 @@ func tcEscape(s string) string { } func GenerateTeamcityReport(report types.Report, dst string) error { + if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { + return err + } f, err := os.Create(dst) if err != nil { return err diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go index f267bdefd..e2519f673 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go @@ -38,7 +38,7 @@ func (d deprecations) Async() Deprecation { func (d deprecations) Measure() Deprecation { return Deprecation{ - Message: "Measure is deprecated and will be removed in Ginkgo V2. Please migrate to gomega/gmeasure.", + Message: "Measure is deprecated and has been removed from Ginkgo V2. Any Measure tests in your spec will not run. Please migrate to gomega/gmeasure.", DocLink: "removed-measure", Version: "1.16.3", } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 8e7f7404f..f895739b8 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.9.2" +const VERSION = "2.11.0" diff --git a/vendor/github.com/onsi/gomega/.gitignore b/vendor/github.com/onsi/gomega/.gitignore index 52266eae1..425d0a509 100644 --- a/vendor/github.com/onsi/gomega/.gitignore +++ b/vendor/github.com/onsi/gomega/.gitignore @@ -3,5 +3,5 @@ . .idea gomega.iml -TODO.md +TODO .vscode \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index ef428f6f6..9b83dd6d4 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,27 @@ +## 1.27.8 + +### Fixes +- HaveExactElement should not call FailureMessage if a submatcher returned an error [096f392] + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.9.5 to 2.9.7 (#669) [8884bee] + +## 1.27.7 + +### Fixes +- fix: gcustom.MakeMatcher accepts nil as actual value (#666) [57054d5] + +### Maintenance +- update gitignore [05c1bc6] +- Bump github.com/onsi/ginkgo/v2 from 2.9.4 to 2.9.5 (#663) [7cadcf6] +- Bump golang.org/x/net from 0.9.0 to 0.10.0 (#662) [b524839] +- Bump github.com/onsi/ginkgo/v2 from 2.9.2 to 2.9.4 (#661) [5f44694] +- Bump commonmarker from 0.23.8 to 0.23.9 in /docs (#657) [05dc99a] +- Bump nokogiri from 1.14.1 to 1.14.3 in /docs (#658) [3a033d1] +- Replace deprecated NewGomegaWithT with NewWithT (#659) [a19238f] +- Bump golang.org/x/net from 0.8.0 to 0.9.0 (#656) [29ed041] +- Bump actions/setup-go from 3 to 4 (#651) [11b2080] + ## 1.27.6 ### Fixes diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 872592bfb..bc7ec293d 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.27.6" +const GOMEGA_VERSION = "1.27.8" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go index 7cce776c1..dca5b9446 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go +++ b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go @@ -44,7 +44,12 @@ func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool elemMatcher := matchers[i].(omegaMatcher) match, err := elemMatcher.Match(values[i]) - if err != nil || !match { + if err != nil { + matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{ + index: i, + failure: err.Error(), + }) + } else if !match { matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{ index: i, failure: elemMatcher.FailureMessage(values[i]), diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go index 6f9e6fd3a..e62892046 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -65,7 +65,4 @@ const ( // AnnotationArtifactDescription is the annotation key for the human readable description for the artifact. AnnotationArtifactDescription = "org.opencontainers.artifact.description" - - // AnnotationReferrersFiltersApplied is the annotation key for the comma separated list of filters applied by the registry in the referrers listing. - AnnotationReferrersFiltersApplied = "org.opencontainers.referrers.filtersApplied" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go deleted file mode 100644 index 03d76ce43..000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2022 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -// Artifact describes an artifact manifest. -// This structure provides `application/vnd.oci.artifact.manifest.v1+json` mediatype when marshalled to JSON. -type Artifact struct { - // MediaType is the media type of the object this schema refers to. - MediaType string `json:"mediaType"` - - // ArtifactType is the IANA media type of the artifact this schema refers to. - ArtifactType string `json:"artifactType"` - - // Blobs is a collection of blobs referenced by this manifest. - Blobs []Descriptor `json:"blobs,omitempty"` - - // Subject (reference) is an optional link from the artifact to another manifest forming an association between the artifact and the other manifest. - Subject *Descriptor `json:"subject,omitempty"` - - // Annotations contains arbitrary metadata for the artifact manifest. - Annotations map[string]string `json:"annotations,omitempty"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go index e6aa113f0..36b0aeb8f 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -49,13 +49,15 @@ type ImageConfig struct { // StopSignal contains the system call signal that will be sent to the container to exit. StopSignal string `json:"StopSignal,omitempty"` - // ArgsEscaped `[Deprecated]` - This field is present only for legacy - // compatibility with Docker and should not be used by new image builders. - // It is used by Docker for Windows images to indicate that the `Entrypoint` - // or `Cmd` or both, contains only a single element array, that is a - // pre-escaped, and combined into a single string `CommandLine`. If `true` - // the value in `Entrypoint` or `Cmd` should be used as-is to avoid double - // escaping. + // ArgsEscaped + // + // Deprecated: This field is present only for legacy compatibility with + // Docker and should not be used by new image builders. It is used by Docker + // for Windows images to indicate that the `Entrypoint` or `Cmd` or both, + // contains only a single element array, that is a pre-escaped, and combined + // into a single string `CommandLine`. If `true` the value in `Entrypoint` or + // `Cmd` should be used as-is to avoid double escaping. + // https://github.com/opencontainers/image-spec/pull/892 ArgsEscaped bool `json:"ArgsEscaped,omitempty"` } @@ -95,22 +97,8 @@ type Image struct { // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. Author string `json:"author,omitempty"` - // Architecture is the CPU architecture which the binaries in this image are built to run on. - Architecture string `json:"architecture"` - - // Variant is the variant of the specified CPU architecture which image binaries are intended to run on. - Variant string `json:"variant,omitempty"` - - // OS is the name of the operating system which the image is built to run on. - OS string `json:"os"` - - // OSVersion is an optional field specifying the operating system - // version, for example on Windows `10.0.14393.1066`. - OSVersion string `json:"os.version,omitempty"` - - // OSFeatures is an optional field specifying an array of strings, - // each listing a required OS feature (for example on Windows `win32k`). - OSFeatures []string `json:"os.features,omitempty"` + // Platform describes the platform which the image in the manifest runs on. + Platform // Config defines the execution parameters which should be used as a base when running a container using the image. Config ImageConfig `json:"config,omitempty"` diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go index 730a09359..4ce7b54cc 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -23,6 +23,9 @@ type Manifest struct { // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` MediaType string `json:"mediaType,omitempty"` + // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. + ArtifactType string `json:"artifactType,omitempty"` + // Config references a configuration object for a container, by digest. // The referenced configuration object is a JSON blob that the runtime uses to set up the container. Config Descriptor `json:"config"` @@ -36,3 +39,11 @@ type Manifest struct { // Annotations contains arbitrary metadata for the image manifest. Annotations map[string]string `json:"annotations,omitempty"` } + +// ScratchDescriptor is the descriptor of a blob with content of `{}`. +var ScratchDescriptor = Descriptor{ + MediaType: MediaTypeScratch, + Digest: `sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a`, + Size: 2, + Data: []byte(`{}`), +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go index 935b481e3..5dd31255e 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -40,21 +40,36 @@ const ( // MediaTypeImageLayerNonDistributable is the media type for layers referenced by // the manifest but with distribution restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" // MediaTypeImageLayerNonDistributableGzip is the media type for // gzipped layers referenced by the manifest but with distribution // restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" // MediaTypeImageLayerNonDistributableZstd is the media type for zstd // compressed layers referenced by the manifest but with distribution // restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" // MediaTypeImageConfig specifies the media type for the image configuration. MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" - // MediaTypeArtifactManifest specifies the media type for a content descriptor. - MediaTypeArtifactManifest = "application/vnd.oci.artifact.manifest.v1+json" + // MediaTypeScratch specifies the media type for an unused blob containing the value `{}` + MediaTypeScratch = "application/vnd.oci.scratch.v1+json" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index 1afd590fe..3d4119b44 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -25,7 +25,7 @@ const ( VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-dev" + VersionDev = "-rc.3" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go index 3ffbcc91f..53d4d6626 100644 --- a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go +++ b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go @@ -344,59 +344,59 @@ func parseExtensions(ext []pkix.Extension) (Extensions, error) { out.GithubWorkflowRef = string(e.Value) // END: Deprecated case e.Id.Equal(OIDIssuerV2): - if err := parseDERString(e.Value, &out.Issuer); err != nil { + if err := ParseDERString(e.Value, &out.Issuer); err != nil { return Extensions{}, err } case e.Id.Equal(OIDBuildSignerURI): - if err := parseDERString(e.Value, &out.BuildSignerURI); err != nil { + if err := ParseDERString(e.Value, &out.BuildSignerURI); err != nil { return Extensions{}, err } case e.Id.Equal(OIDBuildSignerDigest): - if err := parseDERString(e.Value, &out.BuildSignerDigest); err != nil { + if err := ParseDERString(e.Value, &out.BuildSignerDigest); err != nil { return Extensions{}, err } case e.Id.Equal(OIDRunnerEnvironment): - if err := parseDERString(e.Value, &out.RunnerEnvironment); err != nil { + if err := ParseDERString(e.Value, &out.RunnerEnvironment); err != nil { return Extensions{}, err } case e.Id.Equal(OIDSourceRepositoryURI): - if err := parseDERString(e.Value, &out.SourceRepositoryURI); err != nil { + if err := ParseDERString(e.Value, &out.SourceRepositoryURI); err != nil { return Extensions{}, err } case e.Id.Equal(OIDSourceRepositoryDigest): - if err := parseDERString(e.Value, &out.SourceRepositoryDigest); err != nil { + if err := ParseDERString(e.Value, &out.SourceRepositoryDigest); err != nil { return Extensions{}, err } case e.Id.Equal(OIDSourceRepositoryRef): - if err := parseDERString(e.Value, &out.SourceRepositoryRef); err != nil { + if err := ParseDERString(e.Value, &out.SourceRepositoryRef); err != nil { return Extensions{}, err } case e.Id.Equal(OIDSourceRepositoryIdentifier): - if err := parseDERString(e.Value, &out.SourceRepositoryIdentifier); err != nil { + if err := ParseDERString(e.Value, &out.SourceRepositoryIdentifier); err != nil { return Extensions{}, err } case e.Id.Equal(OIDSourceRepositoryOwnerURI): - if err := parseDERString(e.Value, &out.SourceRepositoryOwnerURI); err != nil { + if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerURI); err != nil { return Extensions{}, err } case e.Id.Equal(OIDSourceRepositoryOwnerIdentifier): - if err := parseDERString(e.Value, &out.SourceRepositoryOwnerIdentifier); err != nil { + if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerIdentifier); err != nil { return Extensions{}, err } case e.Id.Equal(OIDBuildConfigURI): - if err := parseDERString(e.Value, &out.BuildConfigURI); err != nil { + if err := ParseDERString(e.Value, &out.BuildConfigURI); err != nil { return Extensions{}, err } case e.Id.Equal(OIDBuildConfigDigest): - if err := parseDERString(e.Value, &out.BuildConfigDigest); err != nil { + if err := ParseDERString(e.Value, &out.BuildConfigDigest); err != nil { return Extensions{}, err } case e.Id.Equal(OIDBuildTrigger): - if err := parseDERString(e.Value, &out.BuildTrigger); err != nil { + if err := ParseDERString(e.Value, &out.BuildTrigger); err != nil { return Extensions{}, err } case e.Id.Equal(OIDRunInvocationURI): - if err := parseDERString(e.Value, &out.RunInvocationURI); err != nil { + if err := ParseDERString(e.Value, &out.RunInvocationURI); err != nil { return Extensions{}, err } } @@ -407,9 +407,9 @@ func parseExtensions(ext []pkix.Extension) (Extensions, error) { return out, nil } -// parseDERString decodes a DER-encoded string and puts the value in parsedVal. -// Rerturns an error if the unmarshalling fails or if there are trailing bytes in the encoding. -func parseDERString(val []byte, parsedVal *string) error { +// ParseDERString decodes a DER-encoded string and puts the value in parsedVal. +// Returns an error if the unmarshalling fails or if there are trailing bytes in the encoding. +func ParseDERString(val []byte, parsedVal *string) error { rest, err := asn1.Unmarshal(val, parsedVal) if err != nil { return fmt.Errorf("unexpected error unmarshalling DER-encoded string: %v", err) diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go index 8b61aa15f..2764b4b31 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go @@ -52,16 +52,32 @@ type Image struct { // Cosign describes a container image signed using Cosign type Cosign struct { - Image name.Digest - Annotations map[string]interface{} + Image name.Digest + // ClaimedIdentity is what the signer claims the image to be; usually a registry.com/…/repo:tag, but can also use a digest instead. + // ALMOST ALL consumers MUST verify that ClaimedIdentity in the signature is correct given how user refers to the image; + // e.g. if the user asks to access a signed image example.com/repo/mysql:3.14, + // it is ALMOST ALWAYS necessary to validate that ClaimedIdentity = example.com/repo/mysql:3.14 + // + // Considerations: + // - The user might refer to an image using a digest (example.com/repo/mysql@sha256:…); in that case the registry/…/repo should still match + // - If the image is multi-arch, ClaimedIdentity usually refers to the top-level multi-arch image index also on the per-arch images + // (possibly even if ClaimedIdentity contains a digest!) + // - Older versions of cosign generate signatures where ClaimedIdentity only contains a registry/…/repo ; signature consumers should allow users + // to determine whether such images should be accepted (and, long-term, the default SHOULD be to reject them) + ClaimedIdentity string + Annotations map[string]interface{} } // SimpleContainerImage returns information about a container image in the github.com/containers/image/signature format func (p Cosign) SimpleContainerImage() SimpleContainerImage { + dockerReference := p.Image.Repository.Name() + if p.ClaimedIdentity != "" { + dockerReference = p.ClaimedIdentity + } return SimpleContainerImage{ Critical: Critical{ Identity: Identity{ - DockerReference: p.Image.Repository.Name(), + DockerReference: dockerReference, }, Image: Image{ DockerManifestDigest: p.Image.DigestStr(), @@ -98,6 +114,7 @@ func (p *Cosign) UnmarshalJSON(data []byte) error { return fmt.Errorf("could not parse image digest string %q: %w", digestStr, err) } p.Image = digest + p.ClaimedIdentity = simple.Critical.Identity.DockerReference p.Annotations = simple.Optional return nil } diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go index 2c15eeab7..0e1f9103d 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go @@ -622,60 +622,52 @@ func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error { return fmt.Errorf("%w", errNotPartition) } - fs, pt, arch, err := descr.getPartitionMetadata() - if err != nil { + var p partition + if err := descr.getExtra(binaryUnmarshaler{&p}); err != nil { return fmt.Errorf("%w", err) } // if already primary system partition, nothing to do - if pt == PartPrimSys { + if p.Parttype == PartPrimSys { return nil } - if pt != PartSystem { + if p.Parttype != PartSystem { return fmt.Errorf("%w", errNotSystem) } - olddescr, err := f.getDescriptor(WithPartitionType(PartPrimSys)) - if err != nil && !errors.Is(err, ErrObjectNotFound) { - return fmt.Errorf("%w", err) - } - extra := partition{ - Fstype: fs, - Parttype: PartPrimSys, - } - copy(extra.Arch[:], arch) - - if err := descr.setExtra(extra); err != nil { - return fmt.Errorf("%w", err) - } - - descr.ModifiedAt = so.t.Unix() - - if olddescr != nil { - oldfs, _, oldarch, err := olddescr.getPartitionMetadata() - if err != nil { + // If there is currently a primary system partition, update it. + if d, err := f.getDescriptor(WithPartitionType(PartPrimSys)); err == nil { + var p partition + if err := d.getExtra(binaryUnmarshaler{&p}); err != nil { return fmt.Errorf("%w", err) } - oldextra := partition{ - Fstype: oldfs, - Parttype: PartSystem, - Arch: getSIFArch(oldarch), - } + p.Parttype = PartSystem - if err := olddescr.setExtra(oldextra); err != nil { + if err := d.setExtra(p); err != nil { return fmt.Errorf("%w", err) } - olddescr.ModifiedAt = so.t.Unix() + d.ModifiedAt = so.t.Unix() + } else if !errors.Is(err, ErrObjectNotFound) { + return fmt.Errorf("%w", err) } + // Update the descriptor of the new primary system partition. + p.Parttype = PartPrimSys + + if err := descr.setExtra(p); err != nil { + return fmt.Errorf("%w", err) + } + + descr.ModifiedAt = so.t.Unix() + if err := f.writeDescriptors(); err != nil { return fmt.Errorf("%w", err) } - f.h.Arch = getSIFArch(arch) + f.h.Arch = p.Arch f.h.ModifiedAt = so.t.Unix() if err := f.writeHeader(); err != nil { diff --git a/vendor/github.com/vbauerster/mpb/v8/README.md b/vendor/github.com/vbauerster/mpb/v8/README.md index d5f787276..09825ca08 100644 --- a/vendor/github.com/vbauerster/mpb/v8/README.md +++ b/vendor/github.com/vbauerster/mpb/v8/README.md @@ -82,8 +82,8 @@ func main() { mpb.AppendDecorators( // replace ETA decorator with "done" message, OnComplete event decor.OnComplete( - // ETA decorator with ewma age of 60 - decor.EwmaETA(decor.ET_STYLE_GO, 60, decor.WCSyncWidth), "done", + // ETA decorator with ewma age of 30 + decor.EwmaETA(decor.ET_STYLE_GO, 30, decor.WCSyncWidth), "done", ), ), ) diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/counters.go b/vendor/github.com/vbauerster/mpb/v8/decor/counters.go index 331c3df67..042027578 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/counters.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/counters.go @@ -2,13 +2,6 @@ package decor import ( "fmt" - "strings" -) - -const ( - _ = iota - UnitKiB - UnitKB ) // CountersNoUnit is a wrapper around Counters with no unit param. @@ -17,54 +10,60 @@ func CountersNoUnit(pairFmt string, wcc ...WC) Decorator { } // CountersKibiByte is a wrapper around Counters with predefined unit -// UnitKiB (bytes/1024). +// as SizeB1024(0). func CountersKibiByte(pairFmt string, wcc ...WC) Decorator { - return Counters(UnitKiB, pairFmt, wcc...) + return Counters(SizeB1024(0), pairFmt, wcc...) } // CountersKiloByte is a wrapper around Counters with predefined unit -// UnitKB (bytes/1000). +// as SizeB1000(0). func CountersKiloByte(pairFmt string, wcc ...WC) Decorator { - return Counters(UnitKB, pairFmt, wcc...) + return Counters(SizeB1000(0), pairFmt, wcc...) } // Counters decorator with dynamic unit measure adjustment. // -// `unit` one of [0|UnitKiB|UnitKB] zero for no unit +// `unit` one of [0|SizeB1024(0)|SizeB1000(0)] // -// `pairFmt` printf compatible verbs for current and total pair +// `pairFmt` printf compatible verbs for current and total // // `wcc` optional WC config // -// pairFmt example if unit=UnitKB: +// pairFmt example if unit=SizeB1000(0): // -// pairFmt="%.1f / %.1f" output: "1.0MB / 12.0MB" -// pairFmt="% .1f / % .1f" output: "1.0 MB / 12.0 MB" // pairFmt="%d / %d" output: "1MB / 12MB" // pairFmt="% d / % d" output: "1 MB / 12 MB" -func Counters(unit int, pairFmt string, wcc ...WC) Decorator { - producer := func(unit int, pairFmt string) DecorFunc { - if pairFmt == "" { - pairFmt = "%d / %d" - } else if strings.Count(pairFmt, "%") != 2 { - panic("expected pairFmt with exactly 2 verbs") - } - switch unit { - case UnitKiB: +// pairFmt="%.1f / %.1f" output: "1.0MB / 12.0MB" +// pairFmt="% .1f / % .1f" output: "1.0 MB / 12.0 MB" +// pairFmt="%f / %f" output: "1.000000MB / 12.000000MB" +// pairFmt="% f / % f" output: "1.000000 MB / 12.000000 MB" +func Counters(unit interface{}, pairFmt string, wcc ...WC) Decorator { + producer := func() DecorFunc { + switch unit.(type) { + case SizeB1024: + if pairFmt == "" { + pairFmt = "% d / % d" + } return func(s Statistics) string { return fmt.Sprintf(pairFmt, SizeB1024(s.Current), SizeB1024(s.Total)) } - case UnitKB: + case SizeB1000: + if pairFmt == "" { + pairFmt = "% d / % d" + } return func(s Statistics) string { return fmt.Sprintf(pairFmt, SizeB1000(s.Current), SizeB1000(s.Total)) } default: + if pairFmt == "" { + pairFmt = "%d / %d" + } return func(s Statistics) string { return fmt.Sprintf(pairFmt, s.Current, s.Total) } } } - return Any(producer(unit, pairFmt), wcc...) + return Any(producer(), wcc...) } // TotalNoUnit is a wrapper around Total with no unit param. @@ -73,55 +72,60 @@ func TotalNoUnit(format string, wcc ...WC) Decorator { } // TotalKibiByte is a wrapper around Total with predefined unit -// UnitKiB (bytes/1024). +// as SizeB1024(0). func TotalKibiByte(format string, wcc ...WC) Decorator { - return Total(UnitKiB, format, wcc...) + return Total(SizeB1024(0), format, wcc...) } // TotalKiloByte is a wrapper around Total with predefined unit -// UnitKB (bytes/1000). +// as SizeB1000(0). func TotalKiloByte(format string, wcc ...WC) Decorator { - return Total(UnitKB, format, wcc...) + return Total(SizeB1000(0), format, wcc...) } // Total decorator with dynamic unit measure adjustment. // -// `unit` one of [0|UnitKiB|UnitKB] zero for no unit +// `unit` one of [0|SizeB1024(0)|SizeB1000(0)] // // `format` printf compatible verb for Total // // `wcc` optional WC config // -// format example if unit=UnitKiB: +// format example if unit=SizeB1024(0): // -// format="%.1f" output: "12.0MiB" -// format="% .1f" output: "12.0 MiB" // format="%d" output: "12MiB" // format="% d" output: "12 MiB" -func Total(unit int, format string, wcc ...WC) Decorator { - producer := func(unit int, format string) DecorFunc { - if format == "" { - format = "%d" - } else if strings.Count(format, "%") != 1 { - panic("expected format with exactly 1 verb") - } - - switch unit { - case UnitKiB: +// format="%.1f" output: "12.0MiB" +// format="% .1f" output: "12.0 MiB" +// format="%f" output: "12.000000MiB" +// format="% f" output: "12.000000 MiB" +func Total(unit interface{}, format string, wcc ...WC) Decorator { + producer := func() DecorFunc { + switch unit.(type) { + case SizeB1024: + if format == "" { + format = "% d" + } return func(s Statistics) string { return fmt.Sprintf(format, SizeB1024(s.Total)) } - case UnitKB: + case SizeB1000: + if format == "" { + format = "% d" + } return func(s Statistics) string { return fmt.Sprintf(format, SizeB1000(s.Total)) } default: + if format == "" { + format = "%d" + } return func(s Statistics) string { return fmt.Sprintf(format, s.Total) } } } - return Any(producer(unit, format), wcc...) + return Any(producer(), wcc...) } // CurrentNoUnit is a wrapper around Current with no unit param. @@ -130,55 +134,60 @@ func CurrentNoUnit(format string, wcc ...WC) Decorator { } // CurrentKibiByte is a wrapper around Current with predefined unit -// UnitKiB (bytes/1024). +// as SizeB1024(0). func CurrentKibiByte(format string, wcc ...WC) Decorator { - return Current(UnitKiB, format, wcc...) + return Current(SizeB1024(0), format, wcc...) } // CurrentKiloByte is a wrapper around Current with predefined unit -// UnitKB (bytes/1000). +// as SizeB1000(0). func CurrentKiloByte(format string, wcc ...WC) Decorator { - return Current(UnitKB, format, wcc...) + return Current(SizeB1000(0), format, wcc...) } // Current decorator with dynamic unit measure adjustment. // -// `unit` one of [0|UnitKiB|UnitKB] zero for no unit +// `unit` one of [0|SizeB1024(0)|SizeB1000(0)] // // `format` printf compatible verb for Current // // `wcc` optional WC config // -// format example if unit=UnitKiB: +// format example if unit=SizeB1024(0): // -// format="%.1f" output: "12.0MiB" -// format="% .1f" output: "12.0 MiB" // format="%d" output: "12MiB" // format="% d" output: "12 MiB" -func Current(unit int, format string, wcc ...WC) Decorator { - producer := func(unit int, format string) DecorFunc { - if format == "" { - format = "%d" - } else if strings.Count(format, "%") != 1 { - panic("expected format with exactly 1 verb") - } - - switch unit { - case UnitKiB: +// format="%.1f" output: "12.0MiB" +// format="% .1f" output: "12.0 MiB" +// format="%f" output: "12.000000MiB" +// format="% f" output: "12.000000 MiB" +func Current(unit interface{}, format string, wcc ...WC) Decorator { + producer := func() DecorFunc { + switch unit.(type) { + case SizeB1024: + if format == "" { + format = "% d" + } return func(s Statistics) string { return fmt.Sprintf(format, SizeB1024(s.Current)) } - case UnitKB: + case SizeB1000: + if format == "" { + format = "% d" + } return func(s Statistics) string { return fmt.Sprintf(format, SizeB1000(s.Current)) } default: + if format == "" { + format = "%d" + } return func(s Statistics) string { return fmt.Sprintf(format, s.Current) } } } - return Any(producer(unit, format), wcc...) + return Any(producer(), wcc...) } // InvertedCurrentNoUnit is a wrapper around InvertedCurrent with no unit param. @@ -187,53 +196,58 @@ func InvertedCurrentNoUnit(format string, wcc ...WC) Decorator { } // InvertedCurrentKibiByte is a wrapper around InvertedCurrent with predefined unit -// UnitKiB (bytes/1024). +// as SizeB1024(0). func InvertedCurrentKibiByte(format string, wcc ...WC) Decorator { - return InvertedCurrent(UnitKiB, format, wcc...) + return InvertedCurrent(SizeB1024(0), format, wcc...) } // InvertedCurrentKiloByte is a wrapper around InvertedCurrent with predefined unit -// UnitKB (bytes/1000). +// as SizeB1000(0). func InvertedCurrentKiloByte(format string, wcc ...WC) Decorator { - return InvertedCurrent(UnitKB, format, wcc...) + return InvertedCurrent(SizeB1000(0), format, wcc...) } // InvertedCurrent decorator with dynamic unit measure adjustment. // -// `unit` one of [0|UnitKiB|UnitKB] zero for no unit +// `unit` one of [0|SizeB1024(0)|SizeB1000(0)] // // `format` printf compatible verb for InvertedCurrent // // `wcc` optional WC config // -// format example if unit=UnitKiB: +// format example if unit=SizeB1024(0): // -// format="%.1f" output: "12.0MiB" -// format="% .1f" output: "12.0 MiB" // format="%d" output: "12MiB" // format="% d" output: "12 MiB" -func InvertedCurrent(unit int, format string, wcc ...WC) Decorator { - producer := func(unit int, format string) DecorFunc { - if format == "" { - format = "%d" - } else if strings.Count(format, "%") != 1 { - panic("expected format with exactly 1 verb") - } - - switch unit { - case UnitKiB: +// format="%.1f" output: "12.0MiB" +// format="% .1f" output: "12.0 MiB" +// format="%f" output: "12.000000MiB" +// format="% f" output: "12.000000 MiB" +func InvertedCurrent(unit interface{}, format string, wcc ...WC) Decorator { + producer := func() DecorFunc { + switch unit.(type) { + case SizeB1024: + if format == "" { + format = "% d" + } return func(s Statistics) string { return fmt.Sprintf(format, SizeB1024(s.Total-s.Current)) } - case UnitKB: + case SizeB1000: + if format == "" { + format = "% d" + } return func(s Statistics) string { return fmt.Sprintf(format, SizeB1000(s.Total-s.Current)) } default: + if format == "" { + format = "%d" + } return func(s Statistics) string { return fmt.Sprintf(format, s.Total-s.Current) } } } - return Any(producer(unit, format), wcc...) + return Any(producer(), wcc...) } diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/eta.go b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go index 3594e0185..e33631dab 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/eta.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go @@ -199,8 +199,7 @@ func chooseTimeProducer(style TimeStyle) func(time.Duration) string { } default: return func(remaining time.Duration) string { - // strip off nanoseconds - return ((remaining / time.Second) * time.Second).String() + return remaining.Truncate(time.Second).String() } } } diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go b/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go index 862ae33d2..65a8d9dae 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go @@ -34,8 +34,7 @@ type onAbortWrapper struct { func (d *onAbortWrapper) Decor(s Statistics) string { if s.Aborted { - wc := d.GetConf() - return wc.FormatMsg(d.msg) + return d.GetConf().FormatMsg(d.msg) } return d.Decorator.Decor(s) } diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go b/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go index 6ee926844..0a3897b81 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go @@ -33,8 +33,7 @@ type onCompleteWrapper struct { func (d *onCompleteWrapper) Decor(s Statistics) string { if s.Completed { - wc := d.GetConf() - return wc.FormatMsg(d.msg) + return d.GetConf().FormatMsg(d.msg) } return d.Decorator.Decor(s) } diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go b/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go index 1d3b3a9e0..9709c196c 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go @@ -7,24 +7,25 @@ import ( "github.com/vbauerster/mpb/v8/internal" ) +var _ fmt.Formatter = percentageType(0) + type percentageType float64 func (s percentageType) Format(st fmt.State, verb rune) { - var prec int + prec := -1 switch verb { - case 'd': - case 's': - prec = -1 - default: + case 'f', 'e', 'E': + prec = 6 // default prec of fmt.Printf("%f|%e|%E") + fallthrough + case 'b', 'g', 'G', 'x', 'X': if p, ok := st.Precision(); ok { prec = p - } else { - prec = 6 } + default: + verb, prec = 'f', 0 } - p := bytesPool.Get().(*[]byte) - b := strconv.AppendFloat(*p, float64(s), 'f', prec, 64) + b := strconv.AppendFloat(make([]byte, 0, 16), float64(s), byte(verb), prec, 64) if st.Flag(' ') { b = append(b, ' ', '%') } else { @@ -34,7 +35,6 @@ func (s percentageType) Format(st fmt.State, verb rune) { if err != nil { panic(err) } - bytesPool.Put(p) } // Percentage returns percentage decorator. It's a wrapper of NewPercentage. @@ -44,12 +44,18 @@ func Percentage(wcc ...WC) Decorator { // NewPercentage percentage decorator with custom format string. // +// `format` printf compatible verb +// +// `wcc` optional WC config +// // format examples: // -// format="%.1f" output: "1.0%" -// format="% .1f" output: "1.0 %" // format="%d" output: "1%" // format="% d" output: "1 %" +// format="%.1f" output: "1.0%" +// format="% .1f" output: "1.0 %" +// format="%f" output: "1.000000%" +// format="% f" output: "1.000000 %" func NewPercentage(format string, wcc ...WC) Decorator { if format == "" { format = "% d" diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/pool.go b/vendor/github.com/vbauerster/mpb/v8/decor/pool.go deleted file mode 100644 index cefa9bfac..000000000 --- a/vendor/github.com/vbauerster/mpb/v8/decor/pool.go +++ /dev/null @@ -1,10 +0,0 @@ -package decor - -import "sync" - -var bytesPool = sync.Pool{ - New: func() interface{} { - b := make([]byte, 0, 32) - return &b - }, -} diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go b/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go index 4df27095f..d9950b61c 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go @@ -8,6 +8,13 @@ import ( //go:generate stringer -type=SizeB1024 -trimprefix=_i //go:generate stringer -type=SizeB1000 -trimprefix=_ +var ( + _ fmt.Formatter = SizeB1024(0) + _ fmt.Stringer = SizeB1024(0) + _ fmt.Formatter = SizeB1000(0) + _ fmt.Stringer = SizeB1000(0) +) + const ( _ib SizeB1024 = iota + 1 _iKiB SizeB1024 = 1 << (iota * 10) @@ -22,17 +29,17 @@ const ( type SizeB1024 int64 func (self SizeB1024) Format(st fmt.State, verb rune) { - var prec int + prec := -1 switch verb { - case 'd': - case 's': - prec = -1 - default: + case 'f', 'e', 'E': + prec = 6 // default prec of fmt.Printf("%f|%e|%E") + fallthrough + case 'b', 'g', 'G', 'x', 'X': if p, ok := st.Precision(); ok { prec = p - } else { - prec = 6 } + default: + verb, prec = 'f', 0 } var unit SizeB1024 @@ -49,8 +56,7 @@ func (self SizeB1024) Format(st fmt.State, verb rune) { unit = _iTiB } - p := bytesPool.Get().(*[]byte) - b := strconv.AppendFloat(*p, float64(self)/float64(unit), 'f', prec, 64) + b := strconv.AppendFloat(make([]byte, 0, 24), float64(self)/float64(unit), byte(verb), prec, 64) if st.Flag(' ') { b = append(b, ' ') } @@ -59,7 +65,6 @@ func (self SizeB1024) Format(st fmt.State, verb rune) { if err != nil { panic(err) } - bytesPool.Put(p) } const ( @@ -76,17 +81,17 @@ const ( type SizeB1000 int64 func (self SizeB1000) Format(st fmt.State, verb rune) { - var prec int + prec := -1 switch verb { - case 'd': - case 's': - prec = -1 - default: + case 'f', 'e', 'E': + prec = 6 // default prec of fmt.Printf("%f|%e|%E") + fallthrough + case 'b', 'g', 'G', 'x', 'X': if p, ok := st.Precision(); ok { prec = p - } else { - prec = 6 } + default: + verb, prec = 'f', 0 } var unit SizeB1000 @@ -103,8 +108,7 @@ func (self SizeB1000) Format(st fmt.State, verb rune) { unit = _TB } - p := bytesPool.Get().(*[]byte) - b := strconv.AppendFloat(*p, float64(self)/float64(unit), 'f', prec, 64) + b := strconv.AppendFloat(make([]byte, 0, 24), float64(self)/float64(unit), byte(verb), prec, 64) if st.Flag(' ') { b = append(b, ' ') } @@ -113,5 +117,4 @@ func (self SizeB1000) Format(st fmt.State, verb rune) { if err != nil { panic(err) } - bytesPool.Put(p) } diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/speed.go b/vendor/github.com/vbauerster/mpb/v8/decor/speed.go index dd0ad7001..d4f644704 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/speed.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/speed.go @@ -39,7 +39,7 @@ func (self speedFormatter) Format(st fmt.State, verb rune) { // EwmaSpeed exponential-weighted-moving-average based speed decorator. // For this decorator to work correctly you have to measure each iteration's // duration and pass it to one of the (*Bar).EwmaIncr... family methods. -func EwmaSpeed(unit int, format string, age float64, wcc ...WC) Decorator { +func EwmaSpeed(unit interface{}, format string, age float64, wcc ...WC) Decorator { var average ewma.MovingAverage if age == 0 { average = ewma.NewMovingAverage() @@ -52,7 +52,7 @@ func EwmaSpeed(unit int, format string, age float64, wcc ...WC) Decorator { // MovingAverageSpeed decorator relies on MovingAverage implementation // to calculate its average. // -// `unit` one of [0|UnitKiB|UnitKB] zero for no unit +// `unit` one of [0|SizeB1024(0)|SizeB1000(0)] // // `format` printf compatible verb for value, like "%f" or "%d" // @@ -62,14 +62,11 @@ func EwmaSpeed(unit int, format string, age float64, wcc ...WC) Decorator { // // format examples: // -// unit=UnitKiB, format="%.1f" output: "1.0MiB/s" -// unit=UnitKiB, format="% .1f" output: "1.0 MiB/s" -// unit=UnitKB, format="%.1f" output: "1.0MB/s" -// unit=UnitKB, format="% .1f" output: "1.0 MB/s" -func MovingAverageSpeed(unit int, format string, average ewma.MovingAverage, wcc ...WC) Decorator { - if format == "" { - format = "%.0f" - } +// unit=SizeB1024(0), format="%.1f" output: "1.0MiB/s" +// unit=SizeB1024(0), format="% .1f" output: "1.0 MiB/s" +// unit=SizeB1000(0), format="%.1f" output: "1.0MB/s" +// unit=SizeB1000(0), format="% .1f" output: "1.0 MB/s" +func MovingAverageSpeed(unit interface{}, format string, average ewma.MovingAverage, wcc ...WC) Decorator { d := &movingAverageSpeed{ WC: initWC(wcc...), average: average, @@ -106,14 +103,14 @@ func (d *movingAverageSpeed) EwmaUpdate(n int64, dur time.Duration) { // AverageSpeed decorator with dynamic unit measure adjustment. It's // a wrapper of NewAverageSpeed. -func AverageSpeed(unit int, format string, wcc ...WC) Decorator { +func AverageSpeed(unit interface{}, format string, wcc ...WC) Decorator { return NewAverageSpeed(unit, format, time.Now(), wcc...) } // NewAverageSpeed decorator with dynamic unit measure adjustment and // user provided start time. // -// `unit` one of [0|UnitKiB|UnitKB] zero for no unit +// `unit` one of [0|SizeB1024(0)|SizeB1000(0)] // // `format` printf compatible verb for value, like "%f" or "%d" // @@ -123,14 +120,11 @@ func AverageSpeed(unit int, format string, wcc ...WC) Decorator { // // format examples: // -// unit=UnitKiB, format="%.1f" output: "1.0MiB/s" -// unit=UnitKiB, format="% .1f" output: "1.0 MiB/s" -// unit=UnitKB, format="%.1f" output: "1.0MB/s" -// unit=UnitKB, format="% .1f" output: "1.0 MB/s" -func NewAverageSpeed(unit int, format string, startTime time.Time, wcc ...WC) Decorator { - if format == "" { - format = "%.0f" - } +// unit=SizeB1024(0), format="%.1f" output: "1.0MiB/s" +// unit=SizeB1024(0), format="% .1f" output: "1.0 MiB/s" +// unit=SizeB1000(0), format="%.1f" output: "1.0MB/s" +// unit=SizeB1000(0), format="% .1f" output: "1.0 MB/s" +func NewAverageSpeed(unit interface{}, format string, startTime time.Time, wcc ...WC) Decorator { d := &averageSpeed{ WC: initWC(wcc...), startTime: startTime, @@ -151,7 +145,6 @@ func (d *averageSpeed) Decor(s Statistics) string { speed := float64(s.Current) / float64(time.Since(d.startTime)) d.msg = d.producer(speed * 1e9) } - return d.FormatMsg(d.msg) } @@ -159,17 +152,26 @@ func (d *averageSpeed) AverageAdjust(startTime time.Time) { d.startTime = startTime } -func chooseSpeedProducer(unit int, format string) func(float64) string { - switch unit { - case UnitKiB: +func chooseSpeedProducer(unit interface{}, format string) func(float64) string { + switch unit.(type) { + case SizeB1024: + if format == "" { + format = "% d" + } return func(speed float64) string { return fmt.Sprintf(format, FmtAsSpeed(SizeB1024(math.Round(speed)))) } - case UnitKB: + case SizeB1000: + if format == "" { + format = "% d" + } return func(speed float64) string { return fmt.Sprintf(format, FmtAsSpeed(SizeB1000(math.Round(speed)))) } default: + if format == "" { + format = "%f" + } return func(speed float64) string { return fmt.Sprintf(format, speed) } diff --git a/vendor/github.com/vbauerster/mpb/v8/heap_manager.go b/vendor/github.com/vbauerster/mpb/v8/heap_manager.go index 678dd7c9f..1b2364f77 100644 --- a/vendor/github.com/vbauerster/mpb/v8/heap_manager.go +++ b/vendor/github.com/vbauerster/mpb/v8/heap_manager.go @@ -46,6 +46,7 @@ func (m heapManager) run() { var sync bool for req := range m { + next: switch req.cmd { case h_push: data := req.data.(pushData) @@ -78,7 +79,8 @@ func (m heapManager) run() { select { case data.iter <- b: case <-data.drop: - break + close(data.iter) + break next } } close(data.iter) @@ -88,7 +90,8 @@ func (m heapManager) run() { select { case data.iter <- heap.Pop(&bHeap).(*Bar): case <-data.drop: - break + close(data.iter) + break next } } close(data.iter) diff --git a/vendor/github.com/vbauerster/mpb/v8/progress.go b/vendor/github.com/vbauerster/mpb/v8/progress.go index 9bb557ee4..cc4e3e102 100644 --- a/vendor/github.com/vbauerster/mpb/v8/progress.go +++ b/vendor/github.com/vbauerster/mpb/v8/progress.go @@ -19,7 +19,7 @@ const ( ) // DoneError represents an error when `*mpb.Progress` is done but its functionality is requested. -var DoneError = fmt.Errorf("%T instance can't be reused after it's done!", (*Progress)(nil)) +var DoneError = fmt.Errorf("%T instance can't be reused after it's done", (*Progress)(nil)) // Progress represents a container that renders one or more progress bars. type Progress struct { @@ -351,7 +351,7 @@ func (s *pState) render(cw *cwriter.Writer) (err error) { } func (s *pState) flush(cw *cwriter.Writer, height int) error { - wg := new(sync.WaitGroup) + var wg sync.WaitGroup defer wg.Wait() // waiting for all s.hm.push to complete var popCount int diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index cff0cd49e..2540bd682 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -104,8 +104,8 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { // Index returns the index of the first occurrence of v in s, // or -1 if not present. func Index[E comparable](s []E, v E) int { - for i, vs := range s { - if v == vs { + for i := range s { + if v == s[i] { return i } } @@ -115,8 +115,8 @@ func Index[E comparable](s []E, v E) int { // IndexFunc returns the first index i satisfying f(s[i]), // or -1 if none do. func IndexFunc[E any](s []E, f func(E) bool) int { - for i, v := range s { - if f(v) { + for i := range s { + if f(s[i]) { return i } } @@ -207,12 +207,12 @@ func Compact[S ~[]E, E comparable](s S) S { return s } i := 1 - last := s[0] - for _, v := range s[1:] { - if v != last { - s[i] = v + for k := 1; k < len(s); k++ { + if s[k] != s[k-1] { + if i != k { + s[i] = s[k] + } i++ - last = v } } return s[:i] @@ -224,12 +224,12 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { return s } i := 1 - last := s[0] - for _, v := range s[1:] { - if !eq(v, last) { - s[i] = v + for k := 1; k < len(s); k++ { + if !eq(s[k], s[k-1]) { + if i != k { + s[i] = s[k] + } i++ - last = v } } return s[:i] diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index f14f40da7..231b6448a 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -81,10 +81,12 @@ func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { } // BinarySearchFunc works like BinarySearch, but uses a custom comparison -// function. The slice must be sorted in increasing order, where "increasing" is -// defined by cmp. cmp(a, b) is expected to return an integer comparing the two -// parameters: 0 if a == b, a negative number if a < b and a positive number if -// a > b. +// function. The slice must be sorted in increasing order, where "increasing" +// is defined by cmp. cmp should return 0 if the slice element matches +// the target, a negative number if the slice element precedes the target, +// or a positive number if the slice element follows the target. +// cmp must implement the same ordering as the slice, such that if +// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) { n := len(x) // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index cd057f398..033b6e6db 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -441,7 +441,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { if s.NewWriteScheduler != nil { sc.writeSched = s.NewWriteScheduler() } else { - sc.writeSched = NewPriorityWriteScheduler(nil) + sc.writeSched = newRoundRobinWriteScheduler() } // These start at the RFC-specified defaults. If there is a higher @@ -2429,7 +2429,7 @@ type requestBody struct { conn *serverConn closeOnce sync.Once // for use by Close only sawEOF bool // for use by Read only - pipe *pipe // non-nil if we have a HTTP entity message body + pipe *pipe // non-nil if we have an HTTP entity message body needsContinue bool // need to send a 100-continue } @@ -2569,7 +2569,8 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { clen = "" } } - if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { + _, hasContentLength := rws.snapHeader["Content-Length"] + if !hasContentLength && clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { clen = strconv.Itoa(len(p)) } _, hasContentType := rws.snapHeader["Content-Type"] @@ -2774,7 +2775,7 @@ func (w *responseWriter) FlushError() error { err = rws.bw.Flush() } else { // The bufio.Writer won't call chunkWriter.Write - // (writeChunk with zero bytes, so we have to do it + // (writeChunk with zero bytes), so we have to do it // ourselves to force the HTTP response header and/or // final DATA frame (with END_STREAM) to be sent. _, err = chunkWriter{rws}.Write(nil) diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index ac90a2631..4f08ccba9 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -1268,8 +1268,8 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { cancelRequest := func(cs *clientStream, err error) error { cs.cc.mu.Lock() - defer cs.cc.mu.Unlock() cs.abortStreamLocked(err) + bodyClosed := cs.reqBodyClosed if cs.ID != 0 { // This request may have failed because of a problem with the connection, // or for some unrelated reason. (For example, the user might have canceled @@ -1284,6 +1284,23 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { // will not help. cs.cc.doNotReuse = true } + cs.cc.mu.Unlock() + // Wait for the request body to be closed. + // + // If nothing closed the body before now, abortStreamLocked + // will have started a goroutine to close it. + // + // Closing the body before returning avoids a race condition + // with net/http checking its readTrackingBody to see if the + // body was read from or closed. See golang/go#60041. + // + // The body is closed in a separate goroutine without the + // connection mutex held, but dropping the mutex before waiting + // will keep us from holding it indefinitely if the body + // close is slow for some reason. + if bodyClosed != nil { + <-bodyClosed + } return err } @@ -1899,7 +1916,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail // 8.1.2.3 Request Pseudo-Header Fields // The :path pseudo-header field includes the path and query parts of the // target URI (the path-absolute production and optionally a '?' character - // followed by the query production (see Sections 3.3 and 3.4 of + // followed by the query production, see Sections 3.3 and 3.4 of // [RFC3986]). f(":authority", host) m := req.Method diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go index c7cd00173..cc893adc2 100644 --- a/vendor/golang.org/x/net/http2/writesched.go +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -184,7 +184,8 @@ func (wr *FrameWriteRequest) replyToWriter(err error) { // writeQueue is used by implementations of WriteScheduler. type writeQueue struct { - s []FrameWriteRequest + s []FrameWriteRequest + prev, next *writeQueue } func (q *writeQueue) empty() bool { return len(q.s) == 0 } diff --git a/vendor/golang.org/x/net/http2/writesched_roundrobin.go b/vendor/golang.org/x/net/http2/writesched_roundrobin.go new file mode 100644 index 000000000..54fe86322 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_roundrobin.go @@ -0,0 +1,119 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" +) + +type roundRobinWriteScheduler struct { + // control contains control frames (SETTINGS, PING, etc.). + control writeQueue + + // streams maps stream ID to a queue. + streams map[uint32]*writeQueue + + // stream queues are stored in a circular linked list. + // head is the next stream to write, or nil if there are no streams open. + head *writeQueue + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +// newRoundRobinWriteScheduler constructs a new write scheduler. +// The round robin scheduler priorizes control frames +// like SETTINGS and PING over DATA frames. +// When there are no control frames to send, it performs a round-robin +// selection from the ready streams. +func newRoundRobinWriteScheduler() WriteScheduler { + ws := &roundRobinWriteScheduler{ + streams: make(map[uint32]*writeQueue), + } + return ws +} + +func (ws *roundRobinWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + if ws.streams[streamID] != nil { + panic(fmt.Errorf("stream %d already opened", streamID)) + } + q := ws.queuePool.get() + ws.streams[streamID] = q + if ws.head == nil { + ws.head = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.head.prev + q.next = ws.head + q.prev.next = q + q.next.prev = q + } +} + +func (ws *roundRobinWriteScheduler) CloseStream(streamID uint32) { + q := ws.streams[streamID] + if q == nil { + return + } + if q.next == q { + // This was the only open stream. + ws.head = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.head == q { + ws.head = q.next + } + } + delete(ws.streams, streamID) + ws.queuePool.put(q) +} + +func (ws *roundRobinWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {} + +func (ws *roundRobinWriteScheduler) Push(wr FrameWriteRequest) { + if wr.isControl() { + ws.control.push(wr) + return + } + q := ws.streams[wr.StreamID()] + if q == nil { + // This is a closed stream. + // wr should not be a HEADERS or DATA frame. + // We push the request onto the control queue. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + ws.control.push(wr) + return + } + q.push(wr) +} + +func (ws *roundRobinWriteScheduler) Pop() (FrameWriteRequest, bool) { + // Control and RST_STREAM frames first. + if !ws.control.empty() { + return ws.control.shift(), true + } + if ws.head == nil { + return FrameWriteRequest{}, false + } + q := ws.head + for { + if wr, ok := q.consume(math.MaxInt32); ok { + ws.head = q.next + return wr, true + } + q = q.next + if q == ws.head { + break + } + } + return FrameWriteRequest{}, false +} diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index cbee7a4e2..b18efb743 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -20,7 +20,7 @@ type token struct{} // A zero Group is valid, has no limit on the number of active goroutines, // and does not cancel on error. type Group struct { - cancel func() + cancel func(error) wg sync.WaitGroup @@ -43,7 +43,7 @@ func (g *Group) done() { // returns a non-nil error or the first time Wait returns, whichever occurs // first. func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := withCancelCause(ctx) return &Group{cancel: cancel}, ctx } @@ -52,7 +52,7 @@ func WithContext(ctx context.Context) (*Group, context.Context) { func (g *Group) Wait() error { g.wg.Wait() if g.cancel != nil { - g.cancel() + g.cancel(g.err) } return g.err } @@ -76,7 +76,7 @@ func (g *Group) Go(f func() error) { g.errOnce.Do(func() { g.err = err if g.cancel != nil { - g.cancel() + g.cancel(g.err) } }) } @@ -105,7 +105,7 @@ func (g *Group) TryGo(f func() error) bool { g.errOnce.Do(func() { g.err = err if g.cancel != nil { - g.cancel() + g.cancel(g.err) } }) } diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go new file mode 100644 index 000000000..7d419d376 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/go120.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 +// +build go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + return context.WithCancelCause(parent) +} diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go new file mode 100644 index 000000000..1795c18ac --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/pre_go120.go @@ -0,0 +1,15 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.20 +// +build !go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + ctx, cancel := context.WithCancel(parent) + return ctx, func(error) { cancel() } +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 165ede0f8..03543bd4b 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -128,15 +128,14 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // (from "version"). Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'i': - _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) - return pkg, err + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'v', 'c', 'd': - _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + case 'i': // indexed, till go1.19 + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) return pkg, err - case 'u': + case 'u': // unified, from go1.20 _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) return pkg, err diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 6bb7168d2..e84f19dfa 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -625,7 +625,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse } if pkg.PkgPath == "unsafe" { - pkg.GoFiles = nil // ignore fake unsafe.go file + pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929) + } else if len(pkg.CompiledGoFiles) == 0 { + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + pkg.CompiledGoFiles = pkg.GoFiles } // Assume go list emits only absolute paths for Dir. @@ -663,13 +668,6 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse response.Roots = append(response.Roots, pkg.ID) } - // Work around for pre-go.1.11 versions of go list. - // TODO(matloob): they should be handled by the fallback. - // Can we delete this? - if len(pkg.CompiledGoFiles) == 0 { - pkg.CompiledGoFiles = pkg.GoFiles - } - // Temporary work-around for golang/go#39986. Parse filenames out of // error messages. This happens if there are unrecoverable syntax // errors in the source, so we can't match on a specific error message. @@ -891,6 +889,15 @@ func golistargs(cfg *Config, words []string, goVersion int) []string { // probably because you'd just get the TestMain. fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), } + + // golang/go#60456: with go1.21 and later, go list serves pgo variants, which + // can be costly to compute and may result in redundant processing for the + // caller. Disable these variants. If someone wants to add e.g. a NeedPGO + // mode flag, that should be a separate proposal. + if goVersion >= 21 { + fullargs = append(fullargs, "-pgo=off") + } + fullargs = append(fullargs, cfg.BuildFlags...) fullargs = append(fullargs, "--") fullargs = append(fullargs, words...) diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 0f1505b80..632be722a 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -308,6 +308,9 @@ type Package struct { TypeErrors []types.Error // GoFiles lists the absolute file paths of the package's Go source files. + // It may include files that should not be compiled, for example because + // they contain non-matching build tags, are documentary pseudo-files such as + // unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing. GoFiles []string // CompiledGoFiles lists the absolute file paths of the package's source diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go deleted file mode 100644 index be8f5a867..000000000 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ /dev/null @@ -1,762 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package objectpath defines a naming scheme for types.Objects -// (that is, named entities in Go programs) relative to their enclosing -// package. -// -// Type-checker objects are canonical, so they are usually identified by -// their address in memory (a pointer), but a pointer has meaning only -// within one address space. By contrast, objectpath names allow the -// identity of an object to be sent from one program to another, -// establishing a correspondence between types.Object variables that are -// distinct but logically equivalent. -// -// A single object may have multiple paths. In this example, -// -// type A struct{ X int } -// type B A -// -// the field X has two paths due to its membership of both A and B. -// The For(obj) function always returns one of these paths, arbitrarily -// but consistently. -package objectpath - -import ( - "fmt" - "go/types" - "sort" - "strconv" - "strings" - - "golang.org/x/tools/internal/typeparams" - - _ "unsafe" // for go:linkname -) - -// A Path is an opaque name that identifies a types.Object -// relative to its package. Conceptually, the name consists of a -// sequence of destructuring operations applied to the package scope -// to obtain the original object. -// The name does not include the package itself. -type Path string - -// Encoding -// -// An object path is a textual and (with training) human-readable encoding -// of a sequence of destructuring operators, starting from a types.Package. -// The sequences represent a path through the package/object/type graph. -// We classify these operators by their type: -// -// PO package->object Package.Scope.Lookup -// OT object->type Object.Type -// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] -// TO type->object Type.{At,Field,Method,Obj} [AFMO] -// -// All valid paths start with a package and end at an object -// and thus may be defined by the regular language: -// -// objectpath = PO (OT TT* TO)* -// -// The concrete encoding follows directly: -// - The only PO operator is Package.Scope.Lookup, which requires an identifier. -// - The only OT operator is Object.Type, -// which we encode as '.' because dot cannot appear in an identifier. -// - The TT operators are encoded as [EKPRUTC]; -// one of these (TypeParam) requires an integer operand, -// which is encoded as a string of decimal digits. -// - The TO operators are encoded as [AFMO]; -// three of these (At,Field,Method) require an integer operand, -// which is encoded as a string of decimal digits. -// These indices are stable across different representations -// of the same package, even source and export data. -// The indices used are implementation specific and may not correspond to -// the argument to the go/types function. -// -// In the example below, -// -// package p -// -// type T interface { -// f() (a string, b struct{ X int }) -// } -// -// field X has the path "T.UM0.RA1.F0", -// representing the following sequence of operations: -// -// p.Lookup("T") T -// .Type().Underlying().Method(0). f -// .Type().Results().At(1) b -// .Type().Field(0) X -// -// The encoding is not maximally compact---every R or P is -// followed by an A, for example---but this simplifies the -// encoder and decoder. -const ( - // object->type operators - opType = '.' // .Type() (Object) - - // type->type operators - opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) - opKey = 'K' // .Key() (Map) - opParams = 'P' // .Params() (Signature) - opResults = 'R' // .Results() (Signature) - opUnderlying = 'U' // .Underlying() (Named) - opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) - opConstraint = 'C' // .Constraint() (TypeParam) - - // type->object operators - opAt = 'A' // .At(i) (Tuple) - opField = 'F' // .Field(i) (Struct) - opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) - opObj = 'O' // .Obj() (Named, TypeParam) -) - -// For returns the path to an object relative to its package, -// or an error if the object is not accessible from the package's Scope. -// -// The For function guarantees to return a path only for the following objects: -// - package-level types -// - exported package-level non-types -// - methods -// - parameter and result variables -// - struct fields -// These objects are sufficient to define the API of their package. -// The objects described by a package's export data are drawn from this set. -// -// For does not return a path for predeclared names, imported package -// names, local names, and unexported package-level names (except -// types). -// -// Example: given this definition, -// -// package p -// -// type T interface { -// f() (a string, b struct{ X int }) -// } -// -// For(X) would return a path that denotes the following sequence of operations: -// -// p.Scope().Lookup("T") (TypeName T) -// .Type().Underlying().Method(0). (method Func f) -// .Type().Results().At(1) (field Var b) -// .Type().Field(0) (field Var X) -// -// where p is the package (*types.Package) to which X belongs. -func For(obj types.Object) (Path, error) { - return newEncoderFor()(obj) -} - -// An encoder amortizes the cost of encoding the paths of multiple objects. -// Nonexported pending approval of proposal 58668. -type encoder struct { - scopeNamesMemo map[*types.Scope][]string // memoization of Scope.Names() - namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods() -} - -// Exposed to gopls via golang.org/x/tools/internal/typesinternal -// pending approval of proposal 58668. -// -//go:linkname newEncoderFor -func newEncoderFor() func(types.Object) (Path, error) { return new(encoder).For } - -func (enc *encoder) For(obj types.Object) (Path, error) { - pkg := obj.Pkg() - - // This table lists the cases of interest. - // - // Object Action - // ------ ------ - // nil reject - // builtin reject - // pkgname reject - // label reject - // var - // package-level accept - // func param/result accept - // local reject - // struct field accept - // const - // package-level accept - // local reject - // func - // package-level accept - // init functions reject - // concrete method accept - // interface method accept - // type - // package-level accept - // local reject - // - // The only accessible package-level objects are members of pkg itself. - // - // The cases are handled in four steps: - // - // 1. reject nil and builtin - // 2. accept package-level objects - // 3. reject obviously invalid objects - // 4. search the API for the path to the param/result/field/method. - - // 1. reference to nil or builtin? - if pkg == nil { - return "", fmt.Errorf("predeclared %s has no path", obj) - } - scope := pkg.Scope() - - // 2. package-level object? - if scope.Lookup(obj.Name()) == obj { - // Only exported objects (and non-exported types) have a path. - // Non-exported types may be referenced by other objects. - if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() { - return "", fmt.Errorf("no path for non-exported %v", obj) - } - return Path(obj.Name()), nil - } - - // 3. Not a package-level object. - // Reject obviously non-viable cases. - switch obj := obj.(type) { - case *types.TypeName: - if _, ok := obj.Type().(*typeparams.TypeParam); !ok { - // With the exception of type parameters, only package-level type names - // have a path. - return "", fmt.Errorf("no path for %v", obj) - } - case *types.Const, // Only package-level constants have a path. - *types.Label, // Labels are function-local. - *types.PkgName: // PkgNames are file-local. - return "", fmt.Errorf("no path for %v", obj) - - case *types.Var: - // Could be: - // - a field (obj.IsField()) - // - a func parameter or result - // - a local var. - // Sadly there is no way to distinguish - // a param/result from a local - // so we must proceed to the find. - - case *types.Func: - // A func, if not package-level, must be a method. - if recv := obj.Type().(*types.Signature).Recv(); recv == nil { - return "", fmt.Errorf("func is not a method: %v", obj) - } - - if path, ok := enc.concreteMethod(obj); ok { - // Fast path for concrete methods that avoids looping over scope. - return path, nil - } - - default: - panic(obj) - } - - // 4. Search the API for the path to the var (field/param/result) or method. - - // First inspect package-level named types. - // In the presence of path aliases, these give - // the best paths because non-types may - // refer to types, but not the reverse. - empty := make([]byte, 0, 48) // initial space - names := enc.scopeNames(scope) - for _, name := range names { - o := scope.Lookup(name) - tname, ok := o.(*types.TypeName) - if !ok { - continue // handle non-types in second pass - } - - path := append(empty, name...) - path = append(path, opType) - - T := o.Type() - - if tname.IsAlias() { - // type alias - if r := find(obj, T, path, nil); r != nil { - return Path(r), nil - } - } else { - if named, _ := T.(*types.Named); named != nil { - if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil { - // generic named type - return Path(r), nil - } - } - // defined (named) type - if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { - return Path(r), nil - } - } - } - - // Then inspect everything else: - // non-types, and declared methods of defined types. - for _, name := range names { - o := scope.Lookup(name) - path := append(empty, name...) - if _, ok := o.(*types.TypeName); !ok { - if o.Exported() { - // exported non-type (const, var, func) - if r := find(obj, o.Type(), append(path, opType), nil); r != nil { - return Path(r), nil - } - } - continue - } - - // Inspect declared methods of defined types. - if T, ok := o.Type().(*types.Named); ok { - path = append(path, opType) - // Note that method index here is always with respect - // to canonical ordering of methods, regardless of how - // they appear in the underlying type. - for i, m := range enc.namedMethods(T) { - path2 := appendOpArg(path, opMethod, i) - if m == obj { - return Path(path2), nil // found declared method - } - if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { - return Path(r), nil - } - } - } - } - - return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path()) -} - -func appendOpArg(path []byte, op byte, arg int) []byte { - path = append(path, op) - path = strconv.AppendInt(path, int64(arg), 10) - return path -} - -// concreteMethod returns the path for meth, which must have a non-nil receiver. -// The second return value indicates success and may be false if the method is -// an interface method or if it is an instantiated method. -// -// This function is just an optimization that avoids the general scope walking -// approach. You are expected to fall back to the general approach if this -// function fails. -func (enc *encoder) concreteMethod(meth *types.Func) (Path, bool) { - // Concrete methods can only be declared on package-scoped named types. For - // that reason we can skip the expensive walk over the package scope: the - // path will always be package -> named type -> method. We can trivially get - // the type name from the receiver, and only have to look over the type's - // methods to find the method index. - // - // Methods on generic types require special consideration, however. Consider - // the following package: - // - // L1: type S[T any] struct{} - // L2: func (recv S[A]) Foo() { recv.Bar() } - // L3: func (recv S[B]) Bar() { } - // L4: type Alias = S[int] - // L5: func _[T any]() { var s S[int]; s.Foo() } - // - // The receivers of methods on generic types are instantiations. L2 and L3 - // instantiate S with the type-parameters A and B, which are scoped to the - // respective methods. L4 and L5 each instantiate S with int. Each of these - // instantiations has its own method set, full of methods (and thus objects) - // with receivers whose types are the respective instantiations. In other - // words, we have - // - // S[A].Foo, S[A].Bar - // S[B].Foo, S[B].Bar - // S[int].Foo, S[int].Bar - // - // We may thus be trying to produce object paths for any of these objects. - // - // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo - // and S.Bar, which are the paths that this function naturally produces. - // - // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that - // don't correspond to the origin methods. For S[int], this is significant. - // The most precise object path for S[int].Foo, for example, is Alias.Foo, - // not S.Foo. Our function, however, would produce S.Foo, which would - // resolve to a different object. - // - // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are - // still the correct paths, since only the origin methods have meaningful - // paths. But this is likely only true for trivial cases and has edge cases. - // Since this function is only an optimization, we err on the side of giving - // up, deferring to the slower but definitely correct algorithm. Most users - // of objectpath will only be giving us origin methods, anyway, as referring - // to instantiated methods is usually not useful. - - if typeparams.OriginMethod(meth) != meth { - return "", false - } - - recvT := meth.Type().(*types.Signature).Recv().Type() - if ptr, ok := recvT.(*types.Pointer); ok { - recvT = ptr.Elem() - } - - named, ok := recvT.(*types.Named) - if !ok { - return "", false - } - - if types.IsInterface(named) { - // Named interfaces don't have to be package-scoped - // - // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface - // methods, too, I think. - return "", false - } - - // Preallocate space for the name, opType, opMethod, and some digits. - name := named.Obj().Name() - path := make([]byte, 0, len(name)+8) - path = append(path, name...) - path = append(path, opType) - for i, m := range enc.namedMethods(named) { - if m == meth { - path = appendOpArg(path, opMethod, i) - return Path(path), true - } - } - - panic(fmt.Sprintf("couldn't find method %s on type %s", meth, named)) -} - -// find finds obj within type T, returning the path to it, or nil if not found. -// -// The seen map is used to short circuit cycles through type parameters. If -// nil, it will be allocated as necessary. -func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { - switch T := T.(type) { - case *types.Basic, *types.Named: - // Named types belonging to pkg were handled already, - // so T must belong to another package. No path. - return nil - case *types.Pointer: - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Slice: - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Array: - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Chan: - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Map: - if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { - return r - } - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Signature: - if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil { - return r - } - if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { - return r - } - return find(obj, T.Results(), append(path, opResults), seen) - case *types.Struct: - for i := 0; i < T.NumFields(); i++ { - fld := T.Field(i) - path2 := appendOpArg(path, opField, i) - if fld == obj { - return path2 // found field var - } - if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { - return r - } - } - return nil - case *types.Tuple: - for i := 0; i < T.Len(); i++ { - v := T.At(i) - path2 := appendOpArg(path, opAt, i) - if v == obj { - return path2 // found param/result var - } - if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { - return r - } - } - return nil - case *types.Interface: - for i := 0; i < T.NumMethods(); i++ { - m := T.Method(i) - path2 := appendOpArg(path, opMethod, i) - if m == obj { - return path2 // found interface method - } - if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { - return r - } - } - return nil - case *typeparams.TypeParam: - name := T.Obj() - if name == obj { - return append(path, opObj) - } - if seen[name] { - return nil - } - if seen == nil { - seen = make(map[*types.TypeName]bool) - } - seen[name] = true - if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { - return r - } - return nil - } - panic(T) -} - -func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { - for i := 0; i < list.Len(); i++ { - tparam := list.At(i) - path2 := appendOpArg(path, opTypeParam, i) - if r := find(obj, tparam, path2, seen); r != nil { - return r - } - } - return nil -} - -// Object returns the object denoted by path p within the package pkg. -func Object(pkg *types.Package, p Path) (types.Object, error) { - if p == "" { - return nil, fmt.Errorf("empty path") - } - - pathstr := string(p) - var pkgobj, suffix string - if dot := strings.IndexByte(pathstr, opType); dot < 0 { - pkgobj = pathstr - } else { - pkgobj = pathstr[:dot] - suffix = pathstr[dot:] // suffix starts with "." - } - - obj := pkg.Scope().Lookup(pkgobj) - if obj == nil { - return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj) - } - - // abstraction of *types.{Pointer,Slice,Array,Chan,Map} - type hasElem interface { - Elem() types.Type - } - // abstraction of *types.{Named,Signature} - type hasTypeParams interface { - TypeParams() *typeparams.TypeParamList - } - // abstraction of *types.{Named,TypeParam} - type hasObj interface { - Obj() *types.TypeName - } - - // The loop state is the pair (t, obj), - // exactly one of which is non-nil, initially obj. - // All suffixes start with '.' (the only object->type operation), - // followed by optional type->type operations, - // then a type->object operation. - // The cycle then repeats. - var t types.Type - for suffix != "" { - code := suffix[0] - suffix = suffix[1:] - - // Codes [AFM] have an integer operand. - var index int - switch code { - case opAt, opField, opMethod, opTypeParam: - rest := strings.TrimLeft(suffix, "0123456789") - numerals := suffix[:len(suffix)-len(rest)] - suffix = rest - i, err := strconv.Atoi(numerals) - if err != nil { - return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code) - } - index = int(i) - case opObj: - // no operand - default: - // The suffix must end with a type->object operation. - if suffix == "" { - return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code) - } - } - - if code == opType { - if t != nil { - return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType) - } - t = obj.Type() - obj = nil - continue - } - - if t == nil { - return nil, fmt.Errorf("invalid path: code %q in object context", code) - } - - // Inv: t != nil, obj == nil - - switch code { - case opElem: - hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t) - } - t = hasElem.Elem() - - case opKey: - mapType, ok := t.(*types.Map) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t) - } - t = mapType.Key() - - case opParams: - sig, ok := t.(*types.Signature) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) - } - t = sig.Params() - - case opResults: - sig, ok := t.(*types.Signature) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) - } - t = sig.Results() - - case opUnderlying: - named, ok := t.(*types.Named) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t) - } - t = named.Underlying() - - case opTypeParam: - hasTypeParams, ok := t.(hasTypeParams) // Named, Signature - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t) - } - tparams := hasTypeParams.TypeParams() - if n := tparams.Len(); index >= n { - return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) - } - t = tparams.At(index) - - case opConstraint: - tparam, ok := t.(*typeparams.TypeParam) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) - } - t = tparam.Constraint() - - case opAt: - tuple, ok := t.(*types.Tuple) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t) - } - if n := tuple.Len(); index >= n { - return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) - } - obj = tuple.At(index) - t = nil - - case opField: - structType, ok := t.(*types.Struct) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t) - } - if n := structType.NumFields(); index >= n { - return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n) - } - obj = structType.Field(index) - t = nil - - case opMethod: - switch t := t.(type) { - case *types.Interface: - if index >= t.NumMethods() { - return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) - } - obj = t.Method(index) // Id-ordered - - case *types.Named: - methods := namedMethods(t) // (unmemoized) - if index >= len(methods) { - return nil, fmt.Errorf("method index %d out of range [0-%d)", index, len(methods)) - } - obj = methods[index] // Id-ordered - - default: - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) - } - t = nil - - case opObj: - hasObj, ok := t.(hasObj) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t) - } - obj = hasObj.Obj() - t = nil - - default: - return nil, fmt.Errorf("invalid path: unknown code %q", code) - } - } - - if obj.Pkg() != pkg { - return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) - } - - return obj, nil // success -} - -// namedMethods returns the methods of a Named type in ascending Id order. -func namedMethods(named *types.Named) []*types.Func { - methods := make([]*types.Func, named.NumMethods()) - for i := range methods { - methods[i] = named.Method(i) - } - sort.Slice(methods, func(i, j int) bool { - return methods[i].Id() < methods[j].Id() - }) - return methods -} - -// scopeNames is a memoization of scope.Names. Callers must not modify the result. -func (enc *encoder) scopeNames(scope *types.Scope) []string { - m := enc.scopeNamesMemo - if m == nil { - m = make(map[*types.Scope][]string) - enc.scopeNamesMemo = m - } - names, ok := m[scope] - if !ok { - names = scope.Names() // allocates and sorts - m[scope] = names - } - return names -} - -// namedMethods is a memoization of the namedMethods function. Callers must not modify the result. -func (enc *encoder) namedMethods(named *types.Named) []*types.Func { - m := enc.namedMethodsMemo - if m == nil { - m = make(map[*types.Named][]*types.Func) - enc.namedMethodsMemo = m - } - methods, ok := m[named] - if !ok { - methods = namedMethods(named) // allocates and sorts - m[named] = methods - } - return methods - -} diff --git a/vendor/golang.org/x/tools/internal/event/tag/tag.go b/vendor/golang.org/x/tools/internal/event/tag/tag.go new file mode 100644 index 000000000..ff2f2ecd3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/tag/tag.go @@ -0,0 +1,59 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tag provides the labels used for telemetry throughout gopls. +package tag + +import ( + "golang.org/x/tools/internal/event/keys" +) + +var ( + // create the label keys we use + Method = keys.NewString("method", "") + StatusCode = keys.NewString("status.code", "") + StatusMessage = keys.NewString("status.message", "") + RPCID = keys.NewString("id", "") + RPCDirection = keys.NewString("direction", "") + File = keys.NewString("file", "") + Directory = keys.New("directory", "") + URI = keys.New("URI", "") + Package = keys.NewString("package", "") // Package ID + PackagePath = keys.NewString("package_path", "") + Query = keys.New("query", "") + Snapshot = keys.NewUInt64("snapshot", "") + Operation = keys.NewString("operation", "") + + Position = keys.New("position", "") + Category = keys.NewString("category", "") + PackageCount = keys.NewInt("packages", "") + Files = keys.New("files", "") + Port = keys.NewInt("port", "") + Type = keys.New("type", "") + HoverKind = keys.NewString("hoverkind", "") + + NewServer = keys.NewString("new_server", "A new server was added") + EndServer = keys.NewString("end_server", "A server was shut down") + + ServerID = keys.NewString("server", "The server ID an event is related to") + Logfile = keys.NewString("logfile", "") + DebugAddress = keys.NewString("debug_address", "") + GoplsPath = keys.NewString("gopls_path", "") + ClientID = keys.NewString("client_id", "") + + Level = keys.NewInt("level", "The logging level") +) + +var ( + // create the stats we measure + Started = keys.NewInt64("started", "Count of started RPCs.") + ReceivedBytes = keys.NewInt64("received_bytes", "Bytes received.") //, unit.Bytes) + SentBytes = keys.NewInt64("sent_bytes", "Bytes sent.") //, unit.Bytes) + Latency = keys.NewFloat64("latency_ms", "Elapsed time in milliseconds") //, unit.Milliseconds) +) + +const ( + Inbound = "in" + Outbound = "out" +) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/internal/gcimporter/bexport.go deleted file mode 100644 index 30582ed6d..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/bexport.go +++ /dev/null @@ -1,852 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; -// see that file for specification of the format. - -package gcimporter - -import ( - "bytes" - "encoding/binary" - "fmt" - "go/constant" - "go/token" - "go/types" - "math" - "math/big" - "sort" - "strings" -) - -// If debugFormat is set, each integer and string value is preceded by a marker -// and position information in the encoding. This mechanism permits an importer -// to recognize immediately when it is out of sync. The importer recognizes this -// mode automatically (i.e., it can import export data produced with debugging -// support even if debugFormat is not set at the time of import). This mode will -// lead to massively larger export data (by a factor of 2 to 3) and should only -// be enabled during development and debugging. -// -// NOTE: This flag is the first flag to enable if importing dies because of -// (suspected) format errors, and whenever a change is made to the format. -const debugFormat = false // default: false - -// Current export format version. Increase with each format change. -// -// Note: The latest binary (non-indexed) export format is at version 6. -// This exporter is still at level 4, but it doesn't matter since -// the binary importer can handle older versions just fine. -// -// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE -// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMENTED HERE -// 4: type name objects support type aliases, uses aliasTag -// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) -// 2: removed unused bool in ODCL export (compiler only) -// 1: header format change (more regular), export package for _ struct fields -// 0: Go1.7 encoding -const exportVersion = 4 - -// trackAllTypes enables cycle tracking for all types, not just named -// types. The existing compiler invariants assume that unnamed types -// that are not completely set up are not used, or else there are spurious -// errors. -// If disabled, only named types are tracked, possibly leading to slightly -// less efficient encoding in rare cases. It also prevents the export of -// some corner-case type declarations (but those are not handled correctly -// with with the textual export format either). -// TODO(gri) enable and remove once issues caused by it are fixed -const trackAllTypes = false - -type exporter struct { - fset *token.FileSet - out bytes.Buffer - - // object -> index maps, indexed in order of serialization - strIndex map[string]int - pkgIndex map[*types.Package]int - typIndex map[types.Type]int - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - - // debugging support - written int // bytes written - indent int // for trace -} - -// internalError represents an error generated inside this package. -type internalError string - -func (e internalError) Error() string { return "gcimporter: " + string(e) } - -func internalErrorf(format string, args ...interface{}) error { - return internalError(fmt.Sprintf(format, args...)) -} - -// BExportData returns binary export data for pkg. -// If no file set is provided, position info will be missing. -func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { - if !debug { - defer func() { - if e := recover(); e != nil { - if ierr, ok := e.(internalError); ok { - err = ierr - return - } - // Not an internal error; panic again. - panic(e) - } - }() - } - - p := exporter{ - fset: fset, - strIndex: map[string]int{"": 0}, // empty string is mapped to 0 - pkgIndex: make(map[*types.Package]int), - typIndex: make(map[types.Type]int), - posInfoFormat: true, // TODO(gri) might become a flag, eventually - } - - // write version info - // The version string must start with "version %d" where %d is the version - // number. Additional debugging information may follow after a blank; that - // text is ignored by the importer. - p.rawStringln(fmt.Sprintf("version %d", exportVersion)) - var debug string - if debugFormat { - debug = "debug" - } - p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly - p.bool(trackAllTypes) - p.bool(p.posInfoFormat) - - // --- generic export data --- - - // populate type map with predeclared "known" types - for index, typ := range predeclared() { - p.typIndex[typ] = index - } - if len(p.typIndex) != len(predeclared()) { - return nil, internalError("duplicate entries in type map?") - } - - // write package data - p.pkg(pkg, true) - if trace { - p.tracef("\n") - } - - // write objects - objcount := 0 - scope := pkg.Scope() - for _, name := range scope.Names() { - if !token.IsExported(name) { - continue - } - if trace { - p.tracef("\n") - } - p.obj(scope.Lookup(name)) - objcount++ - } - - // indicate end of list - if trace { - p.tracef("\n") - } - p.tag(endTag) - - // for self-verification only (redundant) - p.int(objcount) - - if trace { - p.tracef("\n") - } - - // --- end of export data --- - - return p.out.Bytes(), nil -} - -func (p *exporter) pkg(pkg *types.Package, emptypath bool) { - if pkg == nil { - panic(internalError("unexpected nil pkg")) - } - - // if we saw the package before, write its index (>= 0) - if i, ok := p.pkgIndex[pkg]; ok { - p.index('P', i) - return - } - - // otherwise, remember the package, write the package tag (< 0) and package data - if trace { - p.tracef("P%d = { ", len(p.pkgIndex)) - defer p.tracef("} ") - } - p.pkgIndex[pkg] = len(p.pkgIndex) - - p.tag(packageTag) - p.string(pkg.Name()) - if emptypath { - p.string("") - } else { - p.string(pkg.Path()) - } -} - -func (p *exporter) obj(obj types.Object) { - switch obj := obj.(type) { - case *types.Const: - p.tag(constTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - p.value(obj.Val()) - - case *types.TypeName: - if obj.IsAlias() { - p.tag(aliasTag) - p.pos(obj) - p.qualifiedName(obj) - } else { - p.tag(typeTag) - } - p.typ(obj.Type()) - - case *types.Var: - p.tag(varTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - - case *types.Func: - p.tag(funcTag) - p.pos(obj) - p.qualifiedName(obj) - sig := obj.Type().(*types.Signature) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - - default: - panic(internalErrorf("unexpected object %v (%T)", obj, obj)) - } -} - -func (p *exporter) pos(obj types.Object) { - if !p.posInfoFormat { - return - } - - file, line := p.fileLine(obj) - if file == p.prevFile { - // common case: write line delta - // delta == 0 means different file or no line change - delta := line - p.prevLine - p.int(delta) - if delta == 0 { - p.int(-1) // -1 means no file change - } - } else { - // different file - p.int(0) - // Encode filename as length of common prefix with previous - // filename, followed by (possibly empty) suffix. Filenames - // frequently share path prefixes, so this can save a lot - // of space and make export data size less dependent on file - // path length. The suffix is unlikely to be empty because - // file names tend to end in ".go". - n := commonPrefixLen(p.prevFile, file) - p.int(n) // n >= 0 - p.string(file[n:]) // write suffix only - p.prevFile = file - p.int(line) - } - p.prevLine = line -} - -func (p *exporter) fileLine(obj types.Object) (file string, line int) { - if p.fset != nil { - pos := p.fset.Position(obj.Pos()) - file = pos.Filename - line = pos.Line - } - return -} - -func commonPrefixLen(a, b string) int { - if len(a) > len(b) { - a, b = b, a - } - // len(a) <= len(b) - i := 0 - for i < len(a) && a[i] == b[i] { - i++ - } - return i -} - -func (p *exporter) qualifiedName(obj types.Object) { - p.string(obj.Name()) - p.pkg(obj.Pkg(), false) -} - -func (p *exporter) typ(t types.Type) { - if t == nil { - panic(internalError("nil type")) - } - - // Possible optimization: Anonymous pointer types *T where - // T is a named type are common. We could canonicalize all - // such types *T to a single type PT = *T. This would lead - // to at most one *T entry in typIndex, and all future *T's - // would be encoded as the respective index directly. Would - // save 1 byte (pointerTag) per *T and reduce the typIndex - // size (at the cost of a canonicalization map). We can do - // this later, without encoding format change. - - // if we saw the type before, write its index (>= 0) - if i, ok := p.typIndex[t]; ok { - p.index('T', i) - return - } - - // otherwise, remember the type, write the type tag (< 0) and type data - if trackAllTypes { - if trace { - p.tracef("T%d = {>\n", len(p.typIndex)) - defer p.tracef("<\n} ") - } - p.typIndex[t] = len(p.typIndex) - } - - switch t := t.(type) { - case *types.Named: - if !trackAllTypes { - // if we don't track all types, track named types now - p.typIndex[t] = len(p.typIndex) - } - - p.tag(namedTag) - p.pos(t.Obj()) - p.qualifiedName(t.Obj()) - p.typ(t.Underlying()) - if !types.IsInterface(t) { - p.assocMethods(t) - } - - case *types.Array: - p.tag(arrayTag) - p.int64(t.Len()) - p.typ(t.Elem()) - - case *types.Slice: - p.tag(sliceTag) - p.typ(t.Elem()) - - case *dddSlice: - p.tag(dddTag) - p.typ(t.elem) - - case *types.Struct: - p.tag(structTag) - p.fieldList(t) - - case *types.Pointer: - p.tag(pointerTag) - p.typ(t.Elem()) - - case *types.Signature: - p.tag(signatureTag) - p.paramList(t.Params(), t.Variadic()) - p.paramList(t.Results(), false) - - case *types.Interface: - p.tag(interfaceTag) - p.iface(t) - - case *types.Map: - p.tag(mapTag) - p.typ(t.Key()) - p.typ(t.Elem()) - - case *types.Chan: - p.tag(chanTag) - p.int(int(3 - t.Dir())) // hack - p.typ(t.Elem()) - - default: - panic(internalErrorf("unexpected type %T: %s", t, t)) - } -} - -func (p *exporter) assocMethods(named *types.Named) { - // Sort methods (for determinism). - var methods []*types.Func - for i := 0; i < named.NumMethods(); i++ { - methods = append(methods, named.Method(i)) - } - sort.Sort(methodsByName(methods)) - - p.int(len(methods)) - - if trace && methods != nil { - p.tracef("associated methods {>\n") - } - - for i, m := range methods { - if trace && i > 0 { - p.tracef("\n") - } - - p.pos(m) - name := m.Name() - p.string(name) - if !exported(name) { - p.pkg(m.Pkg(), false) - } - - sig := m.Type().(*types.Signature) - p.paramList(types.NewTuple(sig.Recv()), false) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - p.int(0) // dummy value for go:nointerface pragma - ignored by importer - } - - if trace && methods != nil { - p.tracef("<\n} ") - } -} - -type methodsByName []*types.Func - -func (x methodsByName) Len() int { return len(x) } -func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } - -func (p *exporter) fieldList(t *types.Struct) { - if trace && t.NumFields() > 0 { - p.tracef("fields {>\n") - defer p.tracef("<\n} ") - } - - p.int(t.NumFields()) - for i := 0; i < t.NumFields(); i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.field(t.Field(i)) - p.string(t.Tag(i)) - } -} - -func (p *exporter) field(f *types.Var) { - if !f.IsField() { - panic(internalError("field expected")) - } - - p.pos(f) - p.fieldName(f) - p.typ(f.Type()) -} - -func (p *exporter) iface(t *types.Interface) { - // TODO(gri): enable importer to load embedded interfaces, - // then emit Embeddeds and ExplicitMethods separately here. - p.int(0) - - n := t.NumMethods() - if trace && n > 0 { - p.tracef("methods {>\n") - defer p.tracef("<\n} ") - } - p.int(n) - for i := 0; i < n; i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.method(t.Method(i)) - } -} - -func (p *exporter) method(m *types.Func) { - sig := m.Type().(*types.Signature) - if sig.Recv() == nil { - panic(internalError("method expected")) - } - - p.pos(m) - p.string(m.Name()) - if m.Name() != "_" && !token.IsExported(m.Name()) { - p.pkg(m.Pkg(), false) - } - - // interface method; no need to encode receiver. - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) -} - -func (p *exporter) fieldName(f *types.Var) { - name := f.Name() - - if f.Anonymous() { - // anonymous field - we distinguish between 3 cases: - // 1) field name matches base type name and is exported - // 2) field name matches base type name and is not exported - // 3) field name doesn't match base type name (alias name) - bname := basetypeName(f.Type()) - if name == bname { - if token.IsExported(name) { - name = "" // 1) we don't need to know the field name or package - } else { - name = "?" // 2) use unexported name "?" to force package export - } - } else { - // 3) indicate alias and export name as is - // (this requires an extra "@" but this is a rare case) - p.string("@") - } - } - - p.string(name) - if name != "" && !token.IsExported(name) { - p.pkg(f.Pkg(), false) - } -} - -func basetypeName(typ types.Type) string { - switch typ := deref(typ).(type) { - case *types.Basic: - return typ.Name() - case *types.Named: - return typ.Obj().Name() - default: - return "" // unnamed type - } -} - -func (p *exporter) paramList(params *types.Tuple, variadic bool) { - // use negative length to indicate unnamed parameters - // (look at the first parameter only since either all - // names are present or all are absent) - n := params.Len() - if n > 0 && params.At(0).Name() == "" { - n = -n - } - p.int(n) - for i := 0; i < params.Len(); i++ { - q := params.At(i) - t := q.Type() - if variadic && i == params.Len()-1 { - t = &dddSlice{t.(*types.Slice).Elem()} - } - p.typ(t) - if n > 0 { - name := q.Name() - p.string(name) - if name != "_" { - p.pkg(q.Pkg(), false) - } - } - p.string("") // no compiler-specific info - } -} - -func (p *exporter) value(x constant.Value) { - if trace { - p.tracef("= ") - } - - switch x.Kind() { - case constant.Bool: - tag := falseTag - if constant.BoolVal(x) { - tag = trueTag - } - p.tag(tag) - - case constant.Int: - if v, exact := constant.Int64Val(x); exact { - // common case: x fits into an int64 - use compact encoding - p.tag(int64Tag) - p.int64(v) - return - } - // uncommon case: large x - use float encoding - // (powers of 2 will be encoded efficiently with exponent) - p.tag(floatTag) - p.float(constant.ToFloat(x)) - - case constant.Float: - p.tag(floatTag) - p.float(x) - - case constant.Complex: - p.tag(complexTag) - p.float(constant.Real(x)) - p.float(constant.Imag(x)) - - case constant.String: - p.tag(stringTag) - p.string(constant.StringVal(x)) - - case constant.Unknown: - // package contains type errors - p.tag(unknownTag) - - default: - panic(internalErrorf("unexpected value %v (%T)", x, x)) - } -} - -func (p *exporter) float(x constant.Value) { - if x.Kind() != constant.Float { - panic(internalErrorf("unexpected constant %v, want float", x)) - } - // extract sign (there is no -0) - sign := constant.Sign(x) - if sign == 0 { - // x == 0 - p.int(0) - return - } - // x != 0 - - var f big.Float - if v, exact := constant.Float64Val(x); exact { - // float64 - f.SetFloat64(v) - } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { - // TODO(gri): add big.Rat accessor to constant.Value. - r := valueToRat(num) - f.SetRat(r.Quo(r, valueToRat(denom))) - } else { - // Value too large to represent as a fraction => inaccessible. - // TODO(gri): add big.Float accessor to constant.Value. - f.SetFloat64(math.MaxFloat64) // FIXME - } - - // extract exponent such that 0.5 <= m < 1.0 - var m big.Float - exp := f.MantExp(&m) - - // extract mantissa as *big.Int - // - set exponent large enough so mant satisfies mant.IsInt() - // - get *big.Int from mant - m.SetMantExp(&m, int(m.MinPrec())) - mant, acc := m.Int(nil) - if acc != big.Exact { - panic(internalError("internal error")) - } - - p.int(sign) - p.int(exp) - p.string(string(mant.Bytes())) -} - -func valueToRat(x constant.Value) *big.Rat { - // Convert little-endian to big-endian. - // I can't believe this is necessary. - bytes := constant.Bytes(x) - for i := 0; i < len(bytes)/2; i++ { - bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] - } - return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) -} - -func (p *exporter) bool(b bool) bool { - if trace { - p.tracef("[") - defer p.tracef("= %v] ", b) - } - - x := 0 - if b { - x = 1 - } - p.int(x) - return b -} - -// ---------------------------------------------------------------------------- -// Low-level encoders - -func (p *exporter) index(marker byte, index int) { - if index < 0 { - panic(internalError("invalid index < 0")) - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%c%d ", marker, index) - } - p.rawInt64(int64(index)) -} - -func (p *exporter) tag(tag int) { - if tag >= 0 { - panic(internalError("invalid tag >= 0")) - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%s ", tagString[-tag]) - } - p.rawInt64(int64(tag)) -} - -func (p *exporter) int(x int) { - p.int64(int64(x)) -} - -func (p *exporter) int64(x int64) { - if debugFormat { - p.marker('i') - } - if trace { - p.tracef("%d ", x) - } - p.rawInt64(x) -} - -func (p *exporter) string(s string) { - if debugFormat { - p.marker('s') - } - if trace { - p.tracef("%q ", s) - } - // if we saw the string before, write its index (>= 0) - // (the empty string is mapped to 0) - if i, ok := p.strIndex[s]; ok { - p.rawInt64(int64(i)) - return - } - // otherwise, remember string and write its negative length and bytes - p.strIndex[s] = len(p.strIndex) - p.rawInt64(-int64(len(s))) - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } -} - -// marker emits a marker byte and position information which makes -// it easy for a reader to detect if it is "out of sync". Used for -// debugFormat format only. -func (p *exporter) marker(m byte) { - p.rawByte(m) - // Enable this for help tracking down the location - // of an incorrect marker when running in debugFormat. - if false && trace { - p.tracef("#%d ", p.written) - } - p.rawInt64(int64(p.written)) -} - -// rawInt64 should only be used by low-level encoders. -func (p *exporter) rawInt64(x int64) { - var tmp [binary.MaxVarintLen64]byte - n := binary.PutVarint(tmp[:], x) - for i := 0; i < n; i++ { - p.rawByte(tmp[i]) - } -} - -// rawStringln should only be used to emit the initial version string. -func (p *exporter) rawStringln(s string) { - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } - p.rawByte('\n') -} - -// rawByte is the bottleneck interface to write to p.out. -// rawByte escapes b as follows (any encoding does that -// hides '$'): -// -// '$' => '|' 'S' -// '|' => '|' '|' -// -// Necessary so other tools can find the end of the -// export data by searching for "$$". -// rawByte should only be used by low-level encoders. -func (p *exporter) rawByte(b byte) { - switch b { - case '$': - // write '$' as '|' 'S' - b = 'S' - fallthrough - case '|': - // write '|' as '|' '|' - p.out.WriteByte('|') - p.written++ - } - p.out.WriteByte(b) - p.written++ -} - -// tracef is like fmt.Printf but it rewrites the format string -// to take care of indentation. -func (p *exporter) tracef(format string, args ...interface{}) { - if strings.ContainsAny(format, "<>\n") { - var buf bytes.Buffer - for i := 0; i < len(format); i++ { - // no need to deal with runes - ch := format[i] - switch ch { - case '>': - p.indent++ - continue - case '<': - p.indent-- - continue - } - buf.WriteByte(ch) - if ch == '\n' { - for j := p.indent; j > 0; j-- { - buf.WriteString(". ") - } - } - } - format = buf.String() - } - fmt.Printf(format, args...) -} - -// Debugging support. -// (tagString is only used when tracing is enabled) -var tagString = [...]string{ - // Packages - -packageTag: "package", - - // Types - -namedTag: "named type", - -arrayTag: "array", - -sliceTag: "slice", - -dddTag: "ddd", - -structTag: "struct", - -pointerTag: "pointer", - -signatureTag: "signature", - -interfaceTag: "interface", - -mapTag: "map", - -chanTag: "chan", - - // Values - -falseTag: "false", - -trueTag: "true", - -int64Tag: "int64", - -floatTag: "float", - -fractionTag: "fraction", - -complexTag: "complex", - -stringTag: "string", - -unknownTag: "unknown", - - // Type aliases - -aliasTag: "alias", -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index b85de0147..d98b0db2a 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -2,340 +2,24 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. +// This file contains the remaining vestiges of +// $GOROOT/src/go/internal/gcimporter/bimport.go. package gcimporter import ( - "encoding/binary" "fmt" - "go/constant" "go/token" "go/types" - "sort" - "strconv" - "strings" "sync" - "unicode" - "unicode/utf8" ) -type importer struct { - imports map[string]*types.Package - data []byte - importpath string - buf []byte // for reading strings - version int // export format version - - // object lists - strList []string // in order of appearance - pathList []string // in order of appearance - pkgList []*types.Package // in order of appearance - typList []types.Type // in order of appearance - interfaceList []*types.Interface // for delayed completion only - trackAllTypes bool - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - fake fakeFileSet - - // debugging support - debugFormat bool - read int // bytes read -} - -// BImportData imports a package from the serialized package data -// and returns the number of bytes consumed and a reference to the package. -// If the export data version is not recognized or the format is otherwise -// compromised, an error is returned. -func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { - // catch panics and return them as errors - const currentVersion = 6 - version := -1 // unknown version - defer func() { - if e := recover(); e != nil { - // Return a (possibly nil or incomplete) package unchanged (see #16088). - if version > currentVersion { - err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) - } else { - err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) - } - } - }() - - p := importer{ - imports: imports, - data: data, - importpath: path, - version: version, - strList: []string{""}, // empty string is mapped to 0 - pathList: []string{""}, // empty string is mapped to 0 - fake: fakeFileSet{ - fset: fset, - files: make(map[string]*fileInfo), - }, - } - defer p.fake.setLines() // set lines for files in fset - - // read version info - var versionstr string - if b := p.rawByte(); b == 'c' || b == 'd' { - // Go1.7 encoding; first byte encodes low-level - // encoding format (compact vs debug). - // For backward-compatibility only (avoid problems with - // old installed packages). Newly compiled packages use - // the extensible format string. - // TODO(gri) Remove this support eventually; after Go1.8. - if b == 'd' { - p.debugFormat = true - } - p.trackAllTypes = p.rawByte() == 'a' - p.posInfoFormat = p.int() != 0 - versionstr = p.string() - if versionstr == "v1" { - version = 0 - } - } else { - // Go1.8 extensible encoding - // read version string and extract version number (ignore anything after the version number) - versionstr = p.rawStringln(b) - if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { - if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { - version = v - } - } - } - p.version = version - - // read version specific flags - extend as necessary - switch p.version { - // case currentVersion: - // ... - // fallthrough - case currentVersion, 5, 4, 3, 2, 1: - p.debugFormat = p.rawStringln(p.rawByte()) == "debug" - p.trackAllTypes = p.int() != 0 - p.posInfoFormat = p.int() != 0 - case 0: - // Go1.7 encoding format - nothing to do here - default: - errorf("unknown bexport format version %d (%q)", p.version, versionstr) - } - - // --- generic export data --- - - // populate typList with predeclared "known" types - p.typList = append(p.typList, predeclared()...) - - // read package data - pkg = p.pkg() - - // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) - objcount := 0 - for { - tag := p.tagOrIndex() - if tag == endTag { - break - } - p.obj(tag) - objcount++ - } - - // self-verification - if count := p.int(); count != objcount { - errorf("got %d objects; want %d", objcount, count) - } - - // ignore compiler-specific import data - - // complete interfaces - // TODO(gri) re-investigate if we still need to do this in a delayed fashion - for _, typ := range p.interfaceList { - typ.Complete() - } - - // record all referenced packages as imports - list := append(([]*types.Package)(nil), p.pkgList[1:]...) - sort.Sort(byPath(list)) - pkg.SetImports(list) - - // package was imported completely and without errors - pkg.MarkComplete() - - return p.read, pkg, nil -} - func errorf(format string, args ...interface{}) { panic(fmt.Sprintf(format, args...)) } -func (p *importer) pkg() *types.Package { - // if the package was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.pkgList[i] - } - - // otherwise, i is the package tag (< 0) - if i != packageTag { - errorf("unexpected package tag %d version %d", i, p.version) - } - - // read package data - name := p.string() - var path string - if p.version >= 5 { - path = p.path() - } else { - path = p.string() - } - if p.version >= 6 { - p.int() // package height; unused by go/types - } - - // we should never see an empty package name - if name == "" { - errorf("empty package name in import") - } - - // an empty path denotes the package we are currently importing; - // it must be the first package we see - if (path == "") != (len(p.pkgList) == 0) { - errorf("package path %q for pkg index %d", path, len(p.pkgList)) - } - - // if the package was imported before, use that one; otherwise create a new one - if path == "" { - path = p.importpath - } - pkg := p.imports[path] - if pkg == nil { - pkg = types.NewPackage(path, name) - p.imports[path] = pkg - } else if pkg.Name() != name { - errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) - } - p.pkgList = append(p.pkgList, pkg) - - return pkg -} - -// objTag returns the tag value for each object kind. -func objTag(obj types.Object) int { - switch obj.(type) { - case *types.Const: - return constTag - case *types.TypeName: - return typeTag - case *types.Var: - return varTag - case *types.Func: - return funcTag - default: - errorf("unexpected object: %v (%T)", obj, obj) // panics - panic("unreachable") - } -} - -func sameObj(a, b types.Object) bool { - // Because unnamed types are not canonicalized, we cannot simply compare types for - // (pointer) identity. - // Ideally we'd check equality of constant values as well, but this is good enough. - return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) -} - -func (p *importer) declare(obj types.Object) { - pkg := obj.Pkg() - if alt := pkg.Scope().Insert(obj); alt != nil { - // This can only trigger if we import a (non-type) object a second time. - // Excluding type aliases, this cannot happen because 1) we only import a package - // once; and b) we ignore compiler-specific export data which may contain - // functions whose inlined function bodies refer to other functions that - // were already imported. - // However, type aliases require reexporting the original type, so we need - // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, - // method importer.obj, switch case importing functions). - // TODO(gri) review/update this comment once the gc compiler handles type aliases. - if !sameObj(obj, alt) { - errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) - } - } -} - -func (p *importer) obj(tag int) { - switch tag { - case constTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - val := p.value() - p.declare(types.NewConst(pos, pkg, name, typ, val)) - - case aliasTag: - // TODO(gri) verify type alias hookup is correct - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - p.declare(types.NewTypeName(pos, pkg, name, typ)) - - case typeTag: - p.typ(nil, nil) - - case varTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - p.declare(types.NewVar(pos, pkg, name, typ)) - - case funcTag: - pos := p.pos() - pkg, name := p.qualifiedName() - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(nil, params, result, isddd) - p.declare(types.NewFunc(pos, pkg, name, sig)) - - default: - errorf("unexpected object tag %d", tag) - } -} - const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go -func (p *importer) pos() token.Pos { - if !p.posInfoFormat { - return token.NoPos - } - - file := p.prevFile - line := p.prevLine - delta := p.int() - line += delta - if p.version >= 5 { - if delta == deltaNewFile { - if n := p.int(); n >= 0 { - // file changed - file = p.path() - line = n - } - } - } else { - if delta == 0 { - if n := p.int(); n >= 0 { - // file changed - file = p.prevFile[:n] + p.string() - line = p.int() - } - } - } - p.prevFile = file - p.prevLine = line - - return p.fake.pos(file, line, 0) -} - // Synthesize a token.Pos type fakeFileSet struct { fset *token.FileSet @@ -389,205 +73,6 @@ var ( fakeLinesOnce sync.Once ) -func (p *importer) qualifiedName() (pkg *types.Package, name string) { - name = p.string() - pkg = p.pkg() - return -} - -func (p *importer) record(t types.Type) { - p.typList = append(p.typList, t) -} - -// A dddSlice is a types.Type representing ...T parameters. -// It only appears for parameter types and does not escape -// the importer. -type dddSlice struct { - elem types.Type -} - -func (t *dddSlice) Underlying() types.Type { return t } -func (t *dddSlice) String() string { return "..." + t.elem.String() } - -// parent is the package which declared the type; parent == nil means -// the package currently imported. The parent package is needed for -// exported struct fields and interface methods which don't contain -// explicit package information in the export data. -// -// A non-nil tname is used as the "owner" of the result type; i.e., -// the result type is the underlying type of tname. tname is used -// to give interface methods a named receiver type where possible. -func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { - // if the type was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.typList[i] - } - - // otherwise, i is the type tag (< 0) - switch i { - case namedTag: - // read type object - pos := p.pos() - parent, name := p.qualifiedName() - scope := parent.Scope() - obj := scope.Lookup(name) - - // if the object doesn't exist yet, create and insert it - if obj == nil { - obj = types.NewTypeName(pos, parent, name, nil) - scope.Insert(obj) - } - - if _, ok := obj.(*types.TypeName); !ok { - errorf("pkg = %s, name = %s => %s", parent, name, obj) - } - - // associate new named type with obj if it doesn't exist yet - t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) - - // but record the existing type, if any - tname := obj.Type().(*types.Named) // tname is either t0 or the existing type - p.record(tname) - - // read underlying type - t0.SetUnderlying(p.typ(parent, t0)) - - // interfaces don't have associated methods - if types.IsInterface(t0) { - return tname - } - - // read associated methods - for i := p.int(); i > 0; i-- { - // TODO(gri) replace this with something closer to fieldName - pos := p.pos() - name := p.string() - if !exported(name) { - p.pkg() - } - - recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? - params, isddd := p.paramList() - result, _ := p.paramList() - p.int() // go:nointerface pragma - discarded - - sig := types.NewSignature(recv.At(0), params, result, isddd) - t0.AddMethod(types.NewFunc(pos, parent, name, sig)) - } - - return tname - - case arrayTag: - t := new(types.Array) - if p.trackAllTypes { - p.record(t) - } - - n := p.int64() - *t = *types.NewArray(p.typ(parent, nil), n) - return t - - case sliceTag: - t := new(types.Slice) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewSlice(p.typ(parent, nil)) - return t - - case dddTag: - t := new(dddSlice) - if p.trackAllTypes { - p.record(t) - } - - t.elem = p.typ(parent, nil) - return t - - case structTag: - t := new(types.Struct) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewStruct(p.fieldList(parent)) - return t - - case pointerTag: - t := new(types.Pointer) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewPointer(p.typ(parent, nil)) - return t - - case signatureTag: - t := new(types.Signature) - if p.trackAllTypes { - p.record(t) - } - - params, isddd := p.paramList() - result, _ := p.paramList() - *t = *types.NewSignature(nil, params, result, isddd) - return t - - case interfaceTag: - // Create a dummy entry in the type list. This is safe because we - // cannot expect the interface type to appear in a cycle, as any - // such cycle must contain a named type which would have been - // first defined earlier. - // TODO(gri) Is this still true now that we have type aliases? - // See issue #23225. - n := len(p.typList) - if p.trackAllTypes { - p.record(nil) - } - - var embeddeds []types.Type - for n := p.int(); n > 0; n-- { - p.pos() - embeddeds = append(embeddeds, p.typ(parent, nil)) - } - - t := newInterface(p.methodList(parent, tname), embeddeds) - p.interfaceList = append(p.interfaceList, t) - if p.trackAllTypes { - p.typList[n] = t - } - return t - - case mapTag: - t := new(types.Map) - if p.trackAllTypes { - p.record(t) - } - - key := p.typ(parent, nil) - val := p.typ(parent, nil) - *t = *types.NewMap(key, val) - return t - - case chanTag: - t := new(types.Chan) - if p.trackAllTypes { - p.record(t) - } - - dir := chanDir(p.int()) - val := p.typ(parent, nil) - *t = *types.NewChan(dir, val) - return t - - default: - errorf("unexpected type tag %d", i) // panics - panic("unreachable") - } -} - func chanDir(d int) types.ChanDir { // tag values must match the constants in cmd/compile/internal/gc/go.go switch d { @@ -603,394 +88,6 @@ func chanDir(d int) types.ChanDir { } } -func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { - if n := p.int(); n > 0 { - fields = make([]*types.Var, n) - tags = make([]string, n) - for i := range fields { - fields[i], tags[i] = p.field(parent) - } - } - return -} - -func (p *importer) field(parent *types.Package) (*types.Var, string) { - pos := p.pos() - pkg, name, alias := p.fieldName(parent) - typ := p.typ(parent, nil) - tag := p.string() - - anonymous := false - if name == "" { - // anonymous field - typ must be T or *T and T must be a type name - switch typ := deref(typ).(type) { - case *types.Basic: // basic types are named types - pkg = nil // // objects defined in Universe scope have no package - name = typ.Name() - case *types.Named: - name = typ.Obj().Name() - default: - errorf("named base type expected") - } - anonymous = true - } else if alias { - // anonymous field: we have an explicit name because it's an alias - anonymous = true - } - - return types.NewField(pos, pkg, name, typ, anonymous), tag -} - -func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { - if n := p.int(); n > 0 { - methods = make([]*types.Func, n) - for i := range methods { - methods[i] = p.method(parent, baseType) - } - } - return -} - -func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { - pos := p.pos() - pkg, name, _ := p.fieldName(parent) - // If we don't have a baseType, use a nil receiver. - // A receiver using the actual interface type (which - // we don't know yet) will be filled in when we call - // types.Interface.Complete. - var recv *types.Var - if baseType != nil { - recv = types.NewVar(token.NoPos, parent, "", baseType) - } - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(recv, params, result, isddd) - return types.NewFunc(pos, pkg, name, sig) -} - -func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { - name = p.string() - pkg = parent - if pkg == nil { - // use the imported package instead - pkg = p.pkgList[0] - } - if p.version == 0 && name == "_" { - // version 0 didn't export a package for _ fields - return - } - switch name { - case "": - // 1) field name matches base type name and is exported: nothing to do - case "?": - // 2) field name matches base type name and is not exported: need package - name = "" - pkg = p.pkg() - case "@": - // 3) field name doesn't match type name (alias) - name = p.string() - alias = true - fallthrough - default: - if !exported(name) { - pkg = p.pkg() - } - } - return -} - -func (p *importer) paramList() (*types.Tuple, bool) { - n := p.int() - if n == 0 { - return nil, false - } - // negative length indicates unnamed parameters - named := true - if n < 0 { - n = -n - named = false - } - // n > 0 - params := make([]*types.Var, n) - isddd := false - for i := range params { - params[i], isddd = p.param(named) - } - return types.NewTuple(params...), isddd -} - -func (p *importer) param(named bool) (*types.Var, bool) { - t := p.typ(nil, nil) - td, isddd := t.(*dddSlice) - if isddd { - t = types.NewSlice(td.elem) - } - - var pkg *types.Package - var name string - if named { - name = p.string() - if name == "" { - errorf("expected named parameter") - } - if name != "_" { - pkg = p.pkg() - } - if i := strings.Index(name, "·"); i > 0 { - name = name[:i] // cut off gc-specific parameter numbering - } - } - - // read and discard compiler-specific info - p.string() - - return types.NewVar(token.NoPos, pkg, name, t), isddd -} - -func exported(name string) bool { - ch, _ := utf8.DecodeRuneInString(name) - return unicode.IsUpper(ch) -} - -func (p *importer) value() constant.Value { - switch tag := p.tagOrIndex(); tag { - case falseTag: - return constant.MakeBool(false) - case trueTag: - return constant.MakeBool(true) - case int64Tag: - return constant.MakeInt64(p.int64()) - case floatTag: - return p.float() - case complexTag: - re := p.float() - im := p.float() - return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) - case stringTag: - return constant.MakeString(p.string()) - case unknownTag: - return constant.MakeUnknown() - default: - errorf("unexpected value tag %d", tag) // panics - panic("unreachable") - } -} - -func (p *importer) float() constant.Value { - sign := p.int() - if sign == 0 { - return constant.MakeInt64(0) - } - - exp := p.int() - mant := []byte(p.string()) // big endian - - // remove leading 0's if any - for len(mant) > 0 && mant[0] == 0 { - mant = mant[1:] - } - - // convert to little endian - // TODO(gri) go/constant should have a more direct conversion function - // (e.g., once it supports a big.Float based implementation) - for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { - mant[i], mant[j] = mant[j], mant[i] - } - - // adjust exponent (constant.MakeFromBytes creates an integer value, - // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) - exp -= len(mant) << 3 - if len(mant) > 0 { - for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { - exp++ - } - } - - x := constant.MakeFromBytes(mant) - switch { - case exp < 0: - d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) - x = constant.BinaryOp(x, token.QUO, d) - case exp > 0: - x = constant.Shift(x, token.SHL, uint(exp)) - } - - if sign < 0 { - x = constant.UnaryOp(token.SUB, x, 0) - } - return x -} - -// ---------------------------------------------------------------------------- -// Low-level decoders - -func (p *importer) tagOrIndex() int { - if p.debugFormat { - p.marker('t') - } - - return int(p.rawInt64()) -} - -func (p *importer) int() int { - x := p.int64() - if int64(int(x)) != x { - errorf("exported integer too large") - } - return int(x) -} - -func (p *importer) int64() int64 { - if p.debugFormat { - p.marker('i') - } - - return p.rawInt64() -} - -func (p *importer) path() string { - if p.debugFormat { - p.marker('p') - } - // if the path was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.pathList[i] - } - // otherwise, i is the negative path length (< 0) - a := make([]string, -i) - for n := range a { - a[n] = p.string() - } - s := strings.Join(a, "/") - p.pathList = append(p.pathList, s) - return s -} - -func (p *importer) string() string { - if p.debugFormat { - p.marker('s') - } - // if the string was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.strList[i] - } - // otherwise, i is the negative string length (< 0) - if n := int(-i); n <= cap(p.buf) { - p.buf = p.buf[:n] - } else { - p.buf = make([]byte, n) - } - for i := range p.buf { - p.buf[i] = p.rawByte() - } - s := string(p.buf) - p.strList = append(p.strList, s) - return s -} - -func (p *importer) marker(want byte) { - if got := p.rawByte(); got != want { - errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) - } - - pos := p.read - if n := int(p.rawInt64()); n != pos { - errorf("incorrect position: got %d; want %d", n, pos) - } -} - -// rawInt64 should only be used by low-level decoders. -func (p *importer) rawInt64() int64 { - i, err := binary.ReadVarint(p) - if err != nil { - errorf("read error: %v", err) - } - return i -} - -// rawStringln should only be used to read the initial version string. -func (p *importer) rawStringln(b byte) string { - p.buf = p.buf[:0] - for b != '\n' { - p.buf = append(p.buf, b) - b = p.rawByte() - } - return string(p.buf) -} - -// needed for binary.ReadVarint in rawInt64 -func (p *importer) ReadByte() (byte, error) { - return p.rawByte(), nil -} - -// byte is the bottleneck interface for reading p.data. -// It unescapes '|' 'S' to '$' and '|' '|' to '|'. -// rawByte should only be used by low-level decoders. -func (p *importer) rawByte() byte { - b := p.data[0] - r := 1 - if b == '|' { - b = p.data[1] - r = 2 - switch b { - case 'S': - b = '$' - case '|': - // nothing to do - default: - errorf("unexpected escape sequence in export data") - } - } - p.data = p.data[r:] - p.read += r - return b - -} - -// ---------------------------------------------------------------------------- -// Export format - -// Tags. Must be < 0. -const ( - // Objects - packageTag = -(iota + 1) - constTag - typeTag - varTag - funcTag - endTag - - // Types - namedTag - arrayTag - sliceTag - dddTag - structTag - pointerTag - signatureTag - interfaceTag - mapTag - chanTag - - // Values - falseTag - trueTag - int64Tag - floatTag - fractionTag // not used by gc - complexTag - stringTag - nilTag // only used by gc (appears in exported inlined function bodies) - unknownTag // not used by gc (only appears in packages with errors) - - // Type aliases - aliasTag -) - var predeclOnce sync.Once var predecl []types.Type // initialized lazily diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index a973dece9..b1223713b 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -230,20 +230,17 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func // Or, define a new standard go/types/gcexportdata package. fset := token.NewFileSet() - // The indexed export format starts with an 'i'; the older - // binary export format starts with a 'c', 'd', or 'v' - // (from "version"). Select appropriate importer. + // Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'i': - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'v', 'c', 'd': - _, pkg, err := BImportData(fset, packages, data, id) + case 'i': // indexed, till go1.19 + _, pkg, err := IImportData(fset, packages, data[1:], id) return pkg, err - case 'u': + case 'u': // unified, from go1.20 _, pkg, err := UImportData(fset, packages, data[1:size], id) return pkg, err diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index ba53cdcdd..9930d8c36 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -44,12 +44,12 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) { return out.Bytes(), err } -// IImportShallow decodes "shallow" types.Package data encoded by IExportShallow -// in the same executable. This function cannot import data from +// IImportShallow decodes "shallow" types.Package data encoded by +// IExportShallow in the same executable. This function cannot import data from // cmd/compile or gcexportdata.Write. -func IImportShallow(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string, insert InsertType) (*types.Package, error) { +func IImportShallow(fset *token.FileSet, getPackage GetPackageFunc, data []byte, path string, insert InsertType) (*types.Package, error) { const bundle = false - pkgs, err := iimportCommon(fset, imports, data, bundle, path, insert) + pkgs, err := iimportCommon(fset, getPackage, data, bundle, path, insert) if err != nil { return nil, err } @@ -969,6 +969,16 @@ func constantToFloat(x constant.Value) *big.Float { return &f } +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + // mpint exports a multi-precision integer. // // For unsigned types, small values are written out as a single @@ -1178,3 +1188,12 @@ func (q *objQueue) popHead() types.Object { q.head++ return obj } + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 448f903e8..94a5eba33 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -85,7 +85,7 @@ const ( // If the export data version is not recognized or the format is otherwise // compromised, an error is returned. func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { - pkgs, err := iimportCommon(fset, imports, data, false, path, nil) + pkgs, err := iimportCommon(fset, GetPackageFromMap(imports), data, false, path, nil) if err != nil { return 0, nil, err } @@ -94,10 +94,33 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data [] // IImportBundle imports a set of packages from the serialized package bundle. func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { - return iimportCommon(fset, imports, data, true, "", nil) + return iimportCommon(fset, GetPackageFromMap(imports), data, true, "", nil) } -func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) { +// A GetPackageFunc is a function that gets the package with the given path +// from the importer state, creating it (with the specified name) if necessary. +// It is an abstraction of the map historically used to memoize package creation. +// +// Two calls with the same path must return the same package. +// +// If the given getPackage func returns nil, the import will fail. +type GetPackageFunc = func(path, name string) *types.Package + +// GetPackageFromMap returns a GetPackageFunc that retrieves packages from the +// given map of package path -> package. +// +// The resulting func may mutate m: if a requested package is not found, a new +// package will be inserted into m. +func GetPackageFromMap(m map[string]*types.Package) GetPackageFunc { + return func(path, name string) *types.Package { + if _, ok := m[path]; !ok { + m[path] = types.NewPackage(path, name) + } + return m[path] + } +} + +func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) { const currentVersion = iexportVersionCurrent version := int64(-1) if !debug { @@ -108,7 +131,7 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data } else if version > currentVersion { err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) } else { - err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e) } } }() @@ -117,11 +140,8 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data r := &intReader{bytes.NewReader(data), path} if bundle { - bundleVersion := r.uint64() - switch bundleVersion { - case bundleVersion: - default: - errorf("unknown bundle format version %d", bundleVersion) + if v := r.uint64(); v != bundleVersion { + errorf("unknown bundle format version %d", v) } } @@ -195,10 +215,9 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data if pkgPath == "" { pkgPath = path } - pkg := imports[pkgPath] + pkg := getPackage(pkgPath, pkgName) if pkg == nil { - pkg = types.NewPackage(pkgPath, pkgName) - imports[pkgPath] = pkg + errorf("internal error: getPackage returned nil package for %s", pkgPath) } else if pkg.Name() != pkgName { errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 34fc783f8..b977435f6 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -10,6 +10,7 @@ package gcimporter import ( + "fmt" "go/token" "go/types" "sort" @@ -63,6 +64,14 @@ type typeInfo struct { } func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + if !debug { + defer func() { + if x := recover(); x != nil { + err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x) + } + }() + } + s := string(data) s = s[:strings.LastIndex(s, "\n$$\n")] input := pkgbits.NewPkgDecoder(path, s) diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index d50551693..8d9fc98d8 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -8,10 +8,12 @@ package gocommand import ( "bytes" "context" + "errors" "fmt" "io" "log" "os" + "reflect" "regexp" "runtime" "strconv" @@ -22,6 +24,9 @@ import ( exec "golang.org/x/sys/execabs" "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" + "golang.org/x/tools/internal/event/tag" ) // An Runner will run go command invocations and serialize @@ -51,9 +56,19 @@ func (runner *Runner) initialize() { // 1.14: go: updating go.mod: existing contents have changed since last read var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) +// verb is an event label for the go command verb. +var verb = keys.NewString("verb", "go command verb") + +func invLabels(inv Invocation) []label.Label { + return []label.Label{verb.Of(inv.Verb), tag.Directory.Of(inv.WorkingDir)} +} + // Run is a convenience wrapper around RunRaw. // It returns only stdout and a "friendly" error. func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...) + defer done() + stdout, _, friendly, _ := runner.RunRaw(ctx, inv) return stdout, friendly } @@ -61,6 +76,9 @@ func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, e // RunPiped runs the invocation serially, always waiting for any concurrent // invocations to complete first. func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { + ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...) + defer done() + _, err := runner.runPiped(ctx, inv, stdout, stderr) return err } @@ -68,6 +86,8 @@ func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stde // RunRaw runs the invocation, serializing requests only if they fight over // go.mod changes. func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...) + defer done() // Make sure the runner is always initialized. runner.initialize() @@ -215,6 +235,18 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd := exec.Command("go", goArgs...) cmd.Stdout = stdout cmd.Stderr = stderr + + // cmd.WaitDelay was added only in go1.20 (see #50436). + if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + waitDelay.Set(reflect.ValueOf(30 * time.Second)) + } + // On darwin the cwd gets resolved to the real path, which breaks anything that // expects the working directory to keep the original path, including the // go command when dealing with modules. @@ -229,6 +261,7 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) cmd.Dir = i.WorkingDir } + defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) return runCmdContext(ctx, cmd) @@ -242,10 +275,85 @@ var DebugHangingGoCommands = false // runCmdContext is like exec.CommandContext except it sends os.Interrupt // before os.Kill. -func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { - if err := cmd.Start(); err != nil { +func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { + // If cmd.Stdout is not an *os.File, the exec package will create a pipe and + // copy it to the Writer in a goroutine until the process has finished and + // either the pipe reaches EOF or command's WaitDelay expires. + // + // However, the output from 'go list' can be quite large, and we don't want to + // keep reading (and allocating buffers) if we've already decided we don't + // care about the output. We don't want to wait for the process to finish, and + // we don't wait to wait for the WaitDelay to expire either. + // + // Instead, if cmd.Stdout requires a copying goroutine we explicitly replace + // it with a pipe (which is an *os.File), which we can close in order to stop + // copying output as soon as we realize we don't care about it. + var stdoutW *os.File + if cmd.Stdout != nil { + if _, ok := cmd.Stdout.(*os.File); !ok { + var stdoutR *os.File + stdoutR, stdoutW, err = os.Pipe() + if err != nil { + return err + } + prevStdout := cmd.Stdout + cmd.Stdout = stdoutW + + stdoutErr := make(chan error, 1) + go func() { + _, err := io.Copy(prevStdout, stdoutR) + if err != nil { + err = fmt.Errorf("copying stdout: %w", err) + } + stdoutErr <- err + }() + defer func() { + // We started a goroutine to copy a stdout pipe. + // Wait for it to finish, or terminate it if need be. + var err2 error + select { + case err2 = <-stdoutErr: + stdoutR.Close() + case <-ctx.Done(): + stdoutR.Close() + // Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close + // should cause the Read call in io.Copy to unblock and return + // immediately, but we still need to receive from stdoutErr to confirm + // that that has happened. + <-stdoutErr + err2 = ctx.Err() + } + if err == nil { + err = err2 + } + }() + + // Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the + // same writer, and have a type that can be compared with ==, at most + // one goroutine at a time will call Write.” + // + // Since we're starting a goroutine that writes to cmd.Stdout, we must + // also update cmd.Stderr so that that still holds. + func() { + defer func() { recover() }() + if cmd.Stderr == prevStdout { + cmd.Stderr = cmd.Stdout + } + }() + } + } + + err = cmd.Start() + if stdoutW != nil { + // The child process has inherited the pipe file, + // so close the copy held in this process. + stdoutW.Close() + stdoutW = nil + } + if err != nil { return err } + resChan := make(chan error, 1) go func() { resChan <- cmd.Wait() @@ -253,11 +361,14 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { // If we're interested in debugging hanging Go commands, stop waiting after a // minute and panic with interesting information. - if DebugHangingGoCommands { + debug := DebugHangingGoCommands + if debug { + timer := time.NewTimer(1 * time.Minute) + defer timer.Stop() select { case err := <-resChan: return err - case <-time.After(1 * time.Minute): + case <-timer.C: HandleHangingGoCommand(cmd.Process) case <-ctx.Done(): } @@ -270,30 +381,25 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { } // Cancelled. Interrupt and see if it ends voluntarily. - cmd.Process.Signal(os.Interrupt) - select { - case err := <-resChan: - return err - case <-time.After(time.Second): + if err := cmd.Process.Signal(os.Interrupt); err == nil { + // (We used to wait only 1s but this proved + // fragile on loaded builder machines.) + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case err := <-resChan: + return err + case <-timer.C: + } } // Didn't shut down in response to interrupt. Kill it hard. // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT // on certain platforms, such as unix. - if err := cmd.Process.Kill(); err != nil && DebugHangingGoCommands { - // Don't panic here as this reliably fails on windows with EINVAL. + if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug { log.Printf("error killing the Go command: %v", err) } - // See above: don't wait indefinitely if we're debugging hanging Go commands. - if DebugHangingGoCommands { - select { - case err := <-resChan: - return err - case <-time.After(10 * time.Second): // a shorter wait as resChan should return quickly following Kill - HandleHangingGoCommand(cmd.Process) - } - } return <-resChan } diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go index 307a76d47..446c5846a 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/version.go +++ b/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -23,21 +23,11 @@ import ( func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { inv.Verb = "list" inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} - inv.Env = append(append([]string{}, inv.Env...), "GO111MODULE=off") - // Unset any unneeded flags, and remove them from BuildFlags, if they're - // present. - inv.ModFile = "" + inv.BuildFlags = nil // This is not a build command. inv.ModFlag = "" - var buildFlags []string - for _, flag := range inv.BuildFlags { - // Flags can be prefixed by one or two dashes. - f := strings.TrimPrefix(strings.TrimPrefix(flag, "-"), "-") - if strings.HasPrefix(f, "mod=") || strings.HasPrefix(f, "modfile=") { - continue - } - buildFlags = append(buildFlags, flag) - } - inv.BuildFlags = buildFlags + inv.ModFile = "" + inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off") + stdoutBytes, err := r.Run(ctx, inv) if err != nil { return 0, err diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go index a3fb2d4f2..7e638ec24 100644 --- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go +++ b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go @@ -7,7 +7,9 @@ package tokeninternal import ( + "fmt" "go/token" + "sort" "sync" "unsafe" ) @@ -57,3 +59,93 @@ func GetLines(file *token.File) []int { panic("unexpected token.File size") } } + +// AddExistingFiles adds the specified files to the FileSet if they +// are not already present. It panics if any pair of files in the +// resulting FileSet would overlap. +func AddExistingFiles(fset *token.FileSet, files []*token.File) { + // Punch through the FileSet encapsulation. + type tokenFileSet struct { + // This type remained essentially consistent from go1.16 to go1.21. + mutex sync.RWMutex + base int + files []*token.File + _ *token.File // changed to atomic.Pointer[token.File] in go1.19 + } + + // If the size of token.FileSet changes, this will fail to compile. + const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{})) + var _ [-delta * delta]int + + type uP = unsafe.Pointer + var ptr *tokenFileSet + *(*uP)(uP(&ptr)) = uP(fset) + ptr.mutex.Lock() + defer ptr.mutex.Unlock() + + // Merge and sort. + newFiles := append(ptr.files, files...) + sort.Slice(newFiles, func(i, j int) bool { + return newFiles[i].Base() < newFiles[j].Base() + }) + + // Reject overlapping files. + // Discard adjacent identical files. + out := newFiles[:0] + for i, file := range newFiles { + if i > 0 { + prev := newFiles[i-1] + if file == prev { + continue + } + if prev.Base()+prev.Size()+1 > file.Base() { + panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", + prev.Name(), prev.Base(), prev.Base()+prev.Size(), + file.Name(), file.Base(), file.Base()+file.Size())) + } + } + out = append(out, file) + } + newFiles = out + + ptr.files = newFiles + + // Advance FileSet.Base(). + if len(newFiles) > 0 { + last := newFiles[len(newFiles)-1] + newBase := last.Base() + last.Size() + 1 + if ptr.base < newBase { + ptr.base = newBase + } + } +} + +// FileSetFor returns a new FileSet containing a sequence of new Files with +// the same base, size, and line as the input files, for use in APIs that +// require a FileSet. +// +// Precondition: the input files must be non-overlapping, and sorted in order +// of their Base. +func FileSetFor(files ...*token.File) *token.FileSet { + fset := token.NewFileSet() + for _, f := range files { + f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) + lines := GetLines(f) + f2.SetLines(lines) + } + return fset +} + +// CloneFileSet creates a new FileSet holding all files in fset. It does not +// create copies of the token.Files in fset: they are added to the resulting +// FileSet unmodified. +func CloneFileSet(fset *token.FileSet) *token.FileSet { + var files []*token.File + fset.Iterate(func(f *token.File) bool { + files = append(files, f) + return true + }) + newFileSet := token.NewFileSet() + AddExistingFiles(newFileSet, files) + return newFileSet +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index 3c53fbc63..ce7d4351b 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -11,8 +11,6 @@ import ( "go/types" "reflect" "unsafe" - - "golang.org/x/tools/go/types/objectpath" ) func SetUsesCgo(conf *types.Config) bool { @@ -52,10 +50,3 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, } var SetGoVersion = func(conf *types.Config, version string) bool { return false } - -// NewObjectpathEncoder returns a function closure equivalent to -// objectpath.For but amortized for multiple (sequential) calls. -// It is a temporary workaround, pending the approval of proposal 58668. -// -//go:linkname NewObjectpathFunc golang.org/x/tools/go/types/objectpath.newEncoderFor -func NewObjectpathFunc() func(types.Object) (objectpath.Path, error) diff --git a/vendor/modules.txt b/vendor/modules.txt index af4207383..eeed0314e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,7 @@ -# github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 +# dario.cat/mergo v1.0.0 +## explicit; go 1.13 +dario.cat/mergo +# github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 ## explicit; go 1.16 github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm @@ -16,7 +19,7 @@ github.com/Microsoft/go-winio/internal/stringbuffer github.com/Microsoft/go-winio/pkg/guid github.com/Microsoft/go-winio/tools/mkwinsyscall github.com/Microsoft/go-winio/vhd -# github.com/Microsoft/hcsshim v0.10.0-rc.7 +# github.com/Microsoft/hcsshim v0.10.0-rc.8 ## explicit; go 1.18 github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim/computestorage @@ -66,7 +69,7 @@ github.com/container-orchestrated-devices/container-device-interface/specs-go # github.com/containerd/cgroups v1.1.0 ## explicit; go 1.17 github.com/containerd/cgroups/stats/v1 -# github.com/containerd/containerd v1.7.0 +# github.com/containerd/containerd v1.7.2 ## explicit; go 1.19 github.com/containerd/containerd/errdefs github.com/containerd/containerd/log @@ -78,7 +81,7 @@ github.com/containerd/stargz-snapshotter/estargz/errorutil # github.com/containers/buildah v1.30.0 ## explicit; go 1.18 github.com/containers/buildah/define -# github.com/containers/common v0.53.0 +# github.com/containers/common v0.55.2 ## explicit; go 1.18 github.com/containers/common/libimage github.com/containers/common/libimage/define @@ -104,7 +107,7 @@ github.com/containers/common/pkg/supplemented github.com/containers/common/pkg/timetype github.com/containers/common/pkg/util github.com/containers/common/version -# github.com/containers/image/v5 v5.25.0 +# github.com/containers/image/v5 v5.26.1 ## explicit; go 1.18 github.com/containers/image/v5/copy github.com/containers/image/v5/directory @@ -290,7 +293,7 @@ github.com/coreos/go-systemd/v22/dbus github.com/coreos/go-systemd/v22/internal/dlopen github.com/coreos/go-systemd/v22/journal github.com/coreos/go-systemd/v22/sdjournal -# github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 +# github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 ## explicit github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer # github.com/cyphar/filepath-securejoin v0.2.3 @@ -386,7 +389,7 @@ github.com/gdamore/tcell/v2/terminfo/x/xfce github.com/gdamore/tcell/v2/terminfo/x/xterm github.com/gdamore/tcell/v2/terminfo/x/xterm_kitty github.com/gdamore/tcell/v2/terminfo/x/xterm_termite -# github.com/go-logr/logr v1.2.3 +# github.com/go-logr/logr v1.2.4 ## explicit; go 1.16 github.com/go-logr/logr github.com/go-logr/logr/funcr @@ -421,7 +424,7 @@ github.com/go-openapi/spec # github.com/go-openapi/strfmt v0.21.7 ## explicit; go 1.19 github.com/go-openapi/strfmt -# github.com/go-openapi/swag v0.22.3 +# github.com/go-openapi/swag v0.22.4 ## explicit; go 1.18 github.com/go-openapi/swag # github.com/go-openapi/validate v0.22.1 @@ -456,14 +459,14 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/go-containerregistry v0.14.0 +# github.com/google/go-containerregistry v0.15.2 ## explicit; go 1.18 github.com/google/go-containerregistry/pkg/name # github.com/google/go-intervals v0.0.2 ## explicit; go 1.12 github.com/google/go-intervals/intervalset -# github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 -## explicit; go 1.14 +# github.com/google/pprof v0.0.0-20230323073829-e72429f035bd +## explicit; go 1.19 github.com/google/pprof/profile # github.com/google/uuid v1.3.0 ## explicit @@ -483,9 +486,6 @@ github.com/hashicorp/go-multierror # github.com/hinshun/vt10x v0.0.0-20220301184237-5011da428d02 ## explicit; go 1.14 github.com/hinshun/vt10x -# github.com/imdario/mergo v0.3.15 -## explicit; go 1.13 -github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap @@ -559,7 +559,7 @@ github.com/mitchellh/mapstructure # github.com/moby/sys/mountinfo v0.6.2 ## explicit; go 1.16 github.com/moby/sys/mountinfo -# github.com/moby/term v0.0.0-20221120202655-abb19827d345 +# github.com/moby/term v0.5.0 ## explicit; go 1.18 github.com/moby/term github.com/moby/term/windows @@ -582,7 +582,7 @@ github.com/nxadm/tail/winfile # github.com/oklog/ulid v1.3.1 ## explicit github.com/oklog/ulid -# github.com/onsi/ginkgo/v2 v2.9.2 +# github.com/onsi/ginkgo/v2 v2.11.0 ## explicit; go 1.18 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -604,7 +604,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.27.6 +# github.com/onsi/gomega v1.27.8 ## explicit; go 1.18 github.com/onsi/gomega github.com/onsi/gomega/format @@ -619,8 +619,8 @@ github.com/onsi/gomega/types # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b -## explicit; go 1.17 +# github.com/opencontainers/image-spec v1.1.0-rc3 +## explicit; go 1.18 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 # github.com/opencontainers/runc v1.1.7 => github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc @@ -675,14 +675,14 @@ github.com/rs/zerolog github.com/rs/zerolog/internal/cbor github.com/rs/zerolog/internal/json github.com/rs/zerolog/log -# github.com/sigstore/fulcio v1.2.0 +# github.com/sigstore/fulcio v1.3.1 ## explicit; go 1.20 github.com/sigstore/fulcio/pkg/certificate -# github.com/sigstore/rekor v1.2.0 +# github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 ## explicit; go 1.19 github.com/sigstore/rekor/pkg/generated/models -# github.com/sigstore/sigstore v1.6.4 -## explicit; go 1.18 +# github.com/sigstore/sigstore v1.7.1 +## explicit; go 1.19 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/signature github.com/sigstore/sigstore/pkg/signature/options @@ -699,7 +699,7 @@ github.com/spf13/pflag # github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 ## explicit github.com/stefanberger/go-pkcs11uri -# github.com/sylabs/sif/v2 v2.11.1 +# github.com/sylabs/sif/v2 v2.11.5 ## explicit; go 1.19 github.com/sylabs/sif/v2/pkg/sif # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 @@ -725,7 +725,7 @@ github.com/ulikunitz/xz/lzma github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm github.com/vbatts/tar-split/tar/storage -# github.com/vbauerster/mpb/v8 v8.3.0 +# github.com/vbauerster/mpb/v8 v8.4.0 ## explicit; go 1.17 github.com/vbauerster/mpb/v8 github.com/vbauerster/mpb/v8/cwriter @@ -779,15 +779,15 @@ golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts -# golang.org/x/exp v0.0.0-20230321023759-10a507213a29 -## explicit; go 1.18 +# golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 +## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices # golang.org/x/mod v0.10.0 ## explicit; go 1.17 golang.org/x/mod/semver -# golang.org/x/net v0.10.0 +# golang.org/x/net v0.11.0 ## explicit; go 1.17 golang.org/x/net/context golang.org/x/net/html @@ -801,8 +801,8 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/sync v0.2.0 -## explicit +# golang.org/x/sync v0.3.0 +## explicit; go 1.17 golang.org/x/sync/errgroup golang.org/x/sync/semaphore # golang.org/x/sys v0.9.0 @@ -838,18 +838,18 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/tools v0.7.0 +# golang.org/x/tools v0.9.3 ## explicit; go 1.18 golang.org/x/tools/cmd/stringer golang.org/x/tools/go/ast/inspector golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/internal/packagesdriver golang.org/x/tools/go/packages -golang.org/x/tools/go/types/objectpath golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core golang.org/x/tools/internal/event/keys golang.org/x/tools/internal/event/label +golang.org/x/tools/internal/event/tag golang.org/x/tools/internal/gcimporter golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/packagesinternal