From 5a0065fc2982ae48de49985a2f736f97ae9cc4d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 28 Jun 2023 22:55:43 +0000 Subject: [PATCH] Bump github.com/containers/common from 0.53.0 to 0.54.0 Bumps [github.com/containers/common](https://github.com/containers/common) from 0.53.0 to 0.54.0. - [Release notes](https://github.com/containers/common/releases) - [Commits](https://github.com/containers/common/compare/v0.53.0...v0.54.0) --- updated-dependencies: - dependency-name: github.com/containers/common dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 62 +- go.sum | 124 +- .../mergo/.deepsource.toml | 2 +- .../imdario => dario.cat}/mergo/.gitignore | 0 .../imdario => dario.cat}/mergo/.travis.yml | 0 .../mergo/CODE_OF_CONDUCT.md | 0 .../mergo/CONTRIBUTING.md | 0 .../imdario => dario.cat}/mergo/LICENSE | 0 .../imdario => dario.cat}/mergo/README.md | 42 +- .../imdario => dario.cat}/mergo/SECURITY.md | 0 .../imdario => dario.cat}/mergo/doc.go | 43 +- .../imdario => dario.cat}/mergo/map.go | 0 .../imdario => dario.cat}/mergo/merge.go | 0 .../imdario => dario.cat}/mergo/mergo.go | 0 .../github.com/Azure/go-ansiterm/SECURITY.md | 41 + .../Microsoft/go-winio/.golangci.yml | 27 +- .../github.com/Microsoft/go-winio/hvsock.go | 6 +- .../Microsoft/go-winio/internal/fs/doc.go | 2 + .../Microsoft/go-winio/internal/fs/fs.go | 202 +++ .../go-winio/internal/fs/security.go | 12 + .../go-winio/internal/fs/zsyscall_windows.go | 64 + .../go-winio/internal/socket/socket.go | 4 +- .../go-winio/internal/stringbuffer/wstring.go | 132 ++ vendor/github.com/Microsoft/go-winio/pipe.go | 22 +- .../tools/mkwinsyscall/mkwinsyscall.go | 85 +- .../Microsoft/go-winio/zsyscall_windows.go | 19 - vendor/github.com/Microsoft/hcsshim/Makefile | 18 +- .../Microsoft/hcsshim/internal/log/format.go | 85 ++ .../Microsoft/hcsshim/internal/log/hook.go | 145 ++- .../Microsoft/hcsshim/internal/log/scrub.go | 22 +- .../Microsoft/hcsshim/internal/oc/errors.go | 69 + .../Microsoft/hcsshim/internal/oc/exporter.go | 77 +- .../Microsoft/hcsshim/internal/oc/span.go | 14 +- .../internal/security/grantvmgroupaccess.go | 14 +- .../containers/common/libimage/filters.go | 12 +- .../containers/common/libimage/history.go | 8 +- .../containers/common/libimage/image.go | 10 +- .../containers/common/libimage/import.go | 12 +- .../containers/common/libimage/layer_tree.go | 29 + .../common/libimage/manifest_list.go | 10 +- .../common/libimage/manifests/manifests.go | 2 +- .../containers/common/libimage/pull.go | 11 +- .../common/libnetwork/types/const.go | 12 +- .../common/libnetwork/types/define.go | 9 +- .../common/libnetwork/types/network.go | 33 + .../common/libnetwork/util/filters.go | 2 +- .../containers/common/libnetwork/util/ip.go | 22 + .../common/pkg/cgroups/cpuset_linux.go | 2 +- .../containers/common/pkg/config/config.go | 17 + .../common/pkg/config/config_darwin.go | 4 +- .../common/pkg/config/containers.conf | 42 +- .../common/pkg/config/containers.conf-freebsd | 7 + .../containers/common/pkg/config/default.go | 16 +- .../common/pkg/config/default_common.go | 7 + .../common/pkg/config/default_freebsd.go | 3 + .../common/pkg/manifests/manifests.go | 3 +- .../containers/common/pkg/retry/retry.go | 29 +- .../common/pkg/supplemented/supplemented.go | 2 +- .../containers/common/pkg/util/util.go | 167 ++- .../containers/common/version/version.go | 2 +- .../containers/image/v5/copy/blob.go | 4 +- .../containers/image/v5/copy/encryption.go | 92 +- .../containers/image/v5/copy/manifest.go | 44 +- .../containers/image/v5/copy/multiple.go | 85 +- .../containers/image/v5/copy/single.go | 8 +- .../image/v5/docker/daemon/client.go | 52 +- .../image/v5/docker/docker_client.go | 15 +- .../image/v5/internal/image/docker_schema2.go | 4 +- .../containers/image/v5/internal/image/oci.go | 6 +- .../internal/manifest/docker_schema2_list.go | 74 +- .../image/v5/internal/manifest/list.go | 35 + .../image/v5/internal/manifest/oci_index.go | 132 +- .../containers/image/v5/internal/set/set.go | 4 +- .../containers/image/v5/manifest/oci.go | 16 +- .../image/v5/oci/layout/oci_transport.go | 12 +- .../image/v5/openshift/openshift-copies.go | 4 +- .../image/v5/pkg/docker/config/config.go | 124 +- .../github.com/containers/image/v5/sif/src.go | 10 +- .../image/v5/tarball/tarball_src.go | 161 +-- .../containers/image/v5/types/types.go | 4 +- .../containers/image/v5/version/version.go | 2 +- .../github.com/containers/storage/.cirrus.yml | 17 +- .../containers/storage/.golangci.yml | 64 +- vendor/github.com/containers/storage/Makefile | 47 +- vendor/github.com/containers/storage/VERSION | 2 +- vendor/github.com/containers/storage/check.go | 1153 +++++++++++++++++ .../containers/storage/containers.go | 24 +- .../containers/storage/drivers/aufs/aufs.go | 25 +- .../containers/storage/drivers/btrfs/btrfs.go | 50 +- .../storage/drivers/chown_windows.go | 3 +- .../storage/drivers/copy/copy_linux.go | 1 + .../containers/storage/drivers/counter.go | 2 +- .../storage/drivers/devmapper/device_setup.go | 6 +- .../storage/drivers/devmapper/deviceset.go | 60 +- .../storage/drivers/devmapper/driver.go | 15 +- .../containers/storage/drivers/driver.go | 15 +- .../storage/drivers/driver_darwin.go | 10 +- .../storage/drivers/driver_freebsd.go | 5 +- .../storage/drivers/driver_linux.go | 8 +- .../storage/drivers/driver_solaris.go | 7 +- .../storage/drivers/driver_unsupported.go | 10 +- .../storage/drivers/driver_windows.go | 10 +- .../containers/storage/drivers/fsdiff.go | 11 +- .../storage/drivers/overlay/check.go | 47 +- .../storage/drivers/overlay/mount.go | 2 +- .../storage/drivers/overlay/overlay.go | 244 ++-- .../storage/drivers/quota/projectquota.go | 28 +- .../drivers/quota/projectquota_unsupported.go | 3 +- .../containers/storage/drivers/template.go | 1 + .../containers/storage/drivers/vfs/driver.go | 47 +- .../storage/drivers/windows/windows.go | 9 +- .../containers/storage/drivers/zfs/zfs.go | 13 +- .../github.com/containers/storage/images.go | 16 +- .../github.com/containers/storage/layers.go | 225 ++-- .../containers/storage/pkg/archive/archive.go | 32 +- .../storage/pkg/archive/archive_linux.go | 5 +- .../storage/pkg/archive/archive_unix.go | 2 +- .../storage/pkg/archive/archive_windows.go | 7 +- .../containers/storage/pkg/archive/changes.go | 14 +- .../storage/pkg/archive/changes_linux.go | 1 - .../storage/pkg/archive/changes_windows.go | 1 - .../containers/storage/pkg/archive/copy.go | 1 - .../containers/storage/pkg/archive/diff.go | 2 +- .../storage/pkg/archive/fflags_bsd.go | 2 +- .../storage/pkg/chrootarchive/archive.go | 2 +- .../pkg/chrootarchive/archive_darwin.go | 3 +- .../storage/pkg/chrootarchive/archive_unix.go | 4 +- .../pkg/chrootarchive/archive_windows.go | 3 +- .../storage/pkg/chrootarchive/diff_unix.go | 1 - .../storage/pkg/chunked/cache_linux.go | 40 +- .../storage/pkg/chunked/compression_linux.go | 123 +- .../pkg/chunked/compressor/compressor.go | 78 +- .../storage/pkg/chunked/compressor/rollsum.go | 12 +- .../pkg/chunked/internal/compression.go | 32 +- .../storage/pkg/chunked/storage_linux.go | 82 +- .../pkg/chunked/storage_unsupported.go | 9 +- .../containers/storage/pkg/config/config.go | 5 + .../storage/pkg/devicemapper/devmapper.go | 12 +- .../pkg/devicemapper/devmapper_wrapper.go | 15 +- .../storage/pkg/fileutils/fileutils.go | 7 +- .../storage/pkg/idmap/idmapped_utils.go | 4 +- .../storage/pkg/idtools/idtools_unix.go | 6 +- .../storage/pkg/idtools/usergroupadd_linux.go | 1 - .../storage/pkg/idtools/utils_unix.go | 4 +- .../storage/pkg/lockfile/lockfile_unix.go | 2 +- .../storage/pkg/lockfile/lockfile_windows.go | 1 + .../storage/pkg/loopback/attach_loopback.go | 7 +- .../pkg/parsers/kernel/kernel_windows.go | 1 - .../containers/storage/pkg/regexp/regexp.go | 96 +- .../storage/pkg/stringid/stringid.go | 2 +- .../containers/storage/pkg/system/errors.go | 6 +- .../storage/pkg/system/init_windows.go | 1 - .../storage/pkg/system/meminfo_solaris.go | 1 - .../containers/storage/pkg/system/path.go | 1 - .../containers/storage/pkg/system/rm.go | 6 + .../storage/pkg/system/stat_common.go | 3 +- .../storage/pkg/system/stat_darwin.go | 6 +- .../storage/pkg/system/stat_freebsd.go | 6 +- .../storage/pkg/system/stat_linux.go | 6 +- .../storage/pkg/system/stat_openbsd.go | 6 +- .../storage/pkg/system/stat_solaris.go | 6 +- .../storage/pkg/system/stat_windows.go | 3 +- .../storage/pkg/truncindex/truncindex.go | 4 +- .../containers/storage/storage.conf | 11 +- vendor/github.com/containers/storage/store.go | 1021 ++++++++------- .../containers/storage/types/errors.go | 37 + .../containers/storage/types/options.go | 51 +- .../storage/types/options_darwin.go | 9 +- .../storage/types/options_freebsd.go | 5 + .../containers/storage/types/options_linux.go | 38 + .../storage/types/options_windows.go | 5 + .../storage/types/storage_test.conf | 10 + .../containers/storage/types/utils.go | 11 +- vendor/github.com/go-logr/logr/.golangci.yaml | 3 - vendor/github.com/go-logr/logr/discard.go | 32 +- vendor/github.com/go-logr/logr/funcr/funcr.go | 27 +- vendor/github.com/go-logr/logr/logr.go | 166 ++- vendor/github.com/go-openapi/swag/util.go | 16 +- .../go-containerregistry/pkg/name/registry.go | 6 + .../github.com/google/pprof/profile/encode.go | 85 +- .../github.com/google/pprof/profile/filter.go | 4 + .../google/pprof/profile/legacy_profile.go | 31 +- .../github.com/google/pprof/profile/merge.go | 278 +++- .../google/pprof/profile/profile.go | 61 +- .../github.com/google/pprof/profile/proto.go | 19 +- .../github.com/google/pprof/profile/prune.go | 26 +- .../github.com/klauspost/compress/README.md | 13 + .../github.com/klauspost/compress/SECURITY.md | 25 + .../klauspost/compress/flate/deflate.go | 5 +- .../compress/flate/huffman_bit_writer.go | 5 - .../compress/flate/huffman_sortByFreq.go | 19 - .../klauspost/compress/huff0/bitwriter.go | 8 - .../klauspost/compress/huff0/decompress.go | 2 +- .../compress/internal/snapref/encode_other.go | 12 - .../klauspost/compress/zstd/README.md | 2 +- .../klauspost/compress/zstd/blockdec.go | 2 +- .../klauspost/compress/zstd/bytebuf.go | 2 +- .../compress/zstd/decoder_options.go | 2 +- .../klauspost/compress/zstd/enc_fast.go | 6 +- .../compress/zstd/encoder_options.go | 2 +- .../klauspost/compress/zstd/framedec.go | 8 +- .../klauspost/compress/zstd/matchlen_amd64.go | 16 + .../klauspost/compress/zstd/matchlen_amd64.s | 68 + .../compress/zstd/matchlen_generic.go | 33 + .../klauspost/compress/zstd/zstd.go | 22 - .../github.com/mistifyio/go-zfs/v3/utils.go | 9 +- .../mistifyio/go-zfs/v3/utils_notsolaris.go | 2 +- .../mistifyio/go-zfs/v3/utils_solaris.go | 2 +- .../github.com/mistifyio/go-zfs/v3/zpool.go | 3 - vendor/github.com/moby/term/doc.go | 3 + vendor/github.com/moby/term/tc.go | 20 - vendor/github.com/moby/term/term.go | 116 +- vendor/github.com/moby/term/term_unix.go | 98 ++ vendor/github.com/moby/term/term_windows.go | 99 +- .../moby/term/{termios.go => termios_unix.go} | 13 +- .../github.com/moby/term/termios_windows.go | 37 + .../moby/term/windows/ansi_reader.go | 4 +- .../github.com/moby/term/windows/console.go | 7 +- vendor/github.com/moby/term/winsize.go | 21 - vendor/github.com/onsi/ginkgo/v2/.gitignore | 2 +- vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 83 ++ .../v2/ginkgo/generators/generate_command.go | 5 + .../ginkgo/generators/generate_templates.go | 6 +- .../v2/ginkgo/generators/generators_common.go | 12 + .../github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go | 3 + .../onsi/ginkgo/v2/internal/focus.go | 71 +- .../interrupt_handler/interrupt_handler.go | 47 +- .../onsi/ginkgo/v2/internal/node.go | 9 + .../onsi/ginkgo/v2/internal/ordering.go | 51 +- .../v2/internal/output_interceptor_unix.go | 11 + .../onsi/ginkgo/v2/internal/suite.go | 8 + .../internal/testingtproxy/testing_t_proxy.go | 4 + .../onsi/ginkgo/v2/internal/writer.go | 2 +- .../onsi/ginkgo/v2/reporters/json_report.go | 13 +- .../onsi/ginkgo/v2/reporters/junit_report.go | 13 +- .../ginkgo/v2/reporters/teamcity_report.go | 4 + .../ginkgo/v2/types/deprecation_support.go | 2 +- .../onsi/ginkgo/v2/types/version.go | 2 +- vendor/github.com/onsi/gomega/.gitignore | 2 +- vendor/github.com/onsi/gomega/CHANGELOG.md | 24 + vendor/github.com/onsi/gomega/gomega_dsl.go | 2 +- .../gomega/matchers/have_exact_elements.go | 7 +- .../image-spec/specs-go/v1/annotations.go | 3 - .../image-spec/specs-go/v1/artifact.go | 34 - .../image-spec/specs-go/v1/config.go | 34 +- .../image-spec/specs-go/v1/manifest.go | 11 + .../image-spec/specs-go/v1/mediatype.go | 19 +- .../image-spec/specs-go/version.go | 2 +- .../runtime-spec/specs-go/config.go | 88 ++ .../runtime-spec/specs-go/version.go | 2 +- .../fulcio/pkg/certificate/extensions.go | 34 +- .../sigstore/pkg/signature/payload/payload.go | 23 +- .../sylabs/sif/v2/pkg/sif/create.go | 54 +- vendor/github.com/vbauerster/mpb/v8/README.md | 4 +- .../vbauerster/mpb/v8/decor/counters.go | 188 +-- .../github.com/vbauerster/mpb/v8/decor/eta.go | 3 +- .../vbauerster/mpb/v8/decor/on_abort.go | 3 +- .../vbauerster/mpb/v8/decor/on_complete.go | 3 +- .../vbauerster/mpb/v8/decor/percentage.go | 30 +- .../vbauerster/mpb/v8/decor/pool.go | 10 - .../vbauerster/mpb/v8/decor/size_type.go | 43 +- .../vbauerster/mpb/v8/decor/speed.go | 52 +- .../vbauerster/mpb/v8/heap_manager.go | 7 +- .../github.com/vbauerster/mpb/v8/progress.go | 4 +- vendor/golang.org/x/exp/slices/slices.go | 28 +- vendor/golang.org/x/exp/slices/sort.go | 10 +- vendor/golang.org/x/net/http2/server.go | 9 +- vendor/golang.org/x/net/http2/transport.go | 21 +- vendor/golang.org/x/net/http2/writesched.go | 3 +- .../x/net/http2/writesched_roundrobin.go | 119 ++ vendor/golang.org/x/sync/errgroup/errgroup.go | 10 +- vendor/golang.org/x/sync/errgroup/go120.go | 14 + .../golang.org/x/sync/errgroup/pre_go120.go | 15 + .../x/tools/go/gcexportdata/gcexportdata.go | 11 +- .../golang.org/x/tools/go/packages/golist.go | 23 +- .../x/tools/go/packages/packages.go | 3 + .../x/tools/go/types/objectpath/objectpath.go | 762 ----------- .../x/tools/internal/event/tag/tag.go | 59 + .../x/tools/internal/gcimporter/bexport.go | 852 ------------ .../x/tools/internal/gcimporter/bimport.go | 907 +------------ .../x/tools/internal/gcimporter/gcimporter.go | 15 +- .../x/tools/internal/gcimporter/iexport.go | 27 +- .../x/tools/internal/gcimporter/iimport.go | 43 +- .../tools/internal/gcimporter/ureader_yes.go | 9 + .../x/tools/internal/gocommand/invoke.go | 146 ++- .../x/tools/internal/gocommand/version.go | 18 +- .../internal/tokeninternal/tokeninternal.go | 92 ++ .../x/tools/internal/typesinternal/types.go | 9 - vendor/modules.txt | 82 +- 289 files changed, 7101 insertions(+), 5406 deletions(-) rename vendor/{github.com/imdario => dario.cat}/mergo/.deepsource.toml (72%) rename vendor/{github.com/imdario => dario.cat}/mergo/.gitignore (100%) rename vendor/{github.com/imdario => dario.cat}/mergo/.travis.yml (100%) rename vendor/{github.com/imdario => dario.cat}/mergo/CODE_OF_CONDUCT.md (100%) rename vendor/{github.com/imdario => dario.cat}/mergo/CONTRIBUTING.md (100%) rename vendor/{github.com/imdario => dario.cat}/mergo/LICENSE (100%) rename vendor/{github.com/imdario => dario.cat}/mergo/README.md (89%) rename vendor/{github.com/imdario => dario.cat}/mergo/SECURITY.md (100%) rename vendor/{github.com/imdario => dario.cat}/mergo/doc.go (88%) rename vendor/{github.com/imdario => dario.cat}/mergo/map.go (100%) rename vendor/{github.com/imdario => dario.cat}/mergo/merge.go (100%) rename vendor/{github.com/imdario => dario.cat}/mergo/mergo.go (100%) create mode 100644 vendor/github.com/Azure/go-ansiterm/SECURITY.md create mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/doc.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/fs.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/security.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/log/format.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go create mode 100644 vendor/github.com/containers/common/pkg/config/default_common.go create mode 100644 vendor/github.com/containers/storage/check.go create mode 100644 vendor/github.com/klauspost/compress/SECURITY.md create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_generic.go create mode 100644 vendor/github.com/moby/term/doc.go delete mode 100644 vendor/github.com/moby/term/tc.go create mode 100644 vendor/github.com/moby/term/term_unix.go rename vendor/github.com/moby/term/{termios.go => termios_unix.go} (50%) create mode 100644 vendor/github.com/moby/term/termios_windows.go delete mode 100644 vendor/github.com/moby/term/winsize.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go delete mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/pool.go create mode 100644 vendor/golang.org/x/net/http2/writesched_roundrobin.go create mode 100644 vendor/golang.org/x/sync/errgroup/go120.go create mode 100644 vendor/golang.org/x/sync/errgroup/pre_go120.go delete mode 100644 vendor/golang.org/x/tools/go/types/objectpath/objectpath.go create mode 100644 vendor/golang.org/x/tools/internal/event/tag/tag.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/bexport.go diff --git a/go.mod b/go.mod index 9bcdc7edb..4bbcd5bcf 100644 --- a/go.mod +++ b/go.mod @@ -5,9 +5,9 @@ go 1.18 require ( github.com/BurntSushi/toml v1.3.2 github.com/containers/buildah v1.30.0 - github.com/containers/common v0.53.0 + github.com/containers/common v0.54.0 github.com/containers/podman/v4 v4.5.1 - github.com/containers/storage v1.46.1 + github.com/containers/storage v1.47.0 github.com/docker/distribution v2.8.2+incompatible github.com/docker/docker v24.0.2+incompatible github.com/docker/go-units v0.5.0 @@ -15,8 +15,8 @@ require ( github.com/hashicorp/go-multierror v1.1.1 github.com/hinshun/vt10x v0.0.0-20220301184237-5011da428d02 github.com/navidys/tvxwidgets v0.3.0 - github.com/onsi/ginkgo/v2 v2.9.2 - github.com/onsi/gomega v1.27.6 + github.com/onsi/ginkgo/v2 v2.11.0 + github.com/onsi/gomega v1.27.8 github.com/pkg/errors v0.9.1 github.com/rivo/tview v0.0.0-20220307222120-9994674d60a8 github.com/rs/zerolog v1.29.1 @@ -26,9 +26,10 @@ require ( ) require ( - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect - github.com/Microsoft/go-winio v0.6.0 // indirect - github.com/Microsoft/hcsshim v0.10.0-rc.7 // indirect + dario.cat/mergo v1.0.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Microsoft/hcsshim v0.10.0-rc.8 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect @@ -36,21 +37,21 @@ require ( github.com/chzyer/readline v1.5.1 // indirect github.com/container-orchestrated-devices/container-device-interface v0.5.4 // indirect github.com/containerd/cgroups v1.1.0 // indirect - github.com/containerd/containerd v1.7.0 // indirect + github.com/containerd/containerd v1.7.2 // indirect github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect - github.com/containers/image/v5 v5.25.0 // indirect + github.com/containers/image/v5 v5.26.0 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.1.7 // indirect github.com/containers/psgo v1.8.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/disiqueira/gotree/v3 v3.0.2 // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gdamore/encoding v1.0.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/logr v1.2.4 // indirect github.com/go-openapi/analysis v0.21.4 // indirect github.com/go-openapi/errors v0.20.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect @@ -59,7 +60,7 @@ require ( github.com/go-openapi/runtime v0.26.0 // indirect github.com/go-openapi/spec v0.20.9 // indirect github.com/go-openapi/strfmt v0.21.7 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/swag v0.22.4 // indirect github.com/go-openapi/validate v0.22.1 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.1-0.20221029134443-4b691ce883d5 // indirect @@ -67,20 +68,19 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cmp v0.5.9 // indirect - github.com/google/go-containerregistry v0.14.0 // indirect + github.com/google/go-containerregistry v0.15.2 // indirect github.com/google/go-intervals v0.0.2 // indirect - github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect + github.com/google/pprof v0.0.0-20230323073829-e72429f035bd // indirect github.com/google/uuid v1.3.0 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/gorilla/schema v1.2.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/imdario/mergo v0.3.15 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jinzhu/copier v0.3.5 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.16.4 // indirect - github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect + github.com/klauspost/compress v1.16.6 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect github.com/kr/fs v0.1.0 // indirect github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect @@ -91,49 +91,49 @@ require ( github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mattn/go-shellwords v1.0.12 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect - github.com/mistifyio/go-zfs/v3 v3.0.0 // indirect + github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect - github.com/moby/term v0.0.0-20221120202655-abb19827d345 // indirect + github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/nxadm/tail v1.4.8 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect - github.com/opencontainers/runc v1.1.5 // indirect - github.com/opencontainers/runtime-spec v1.1.0-rc.2 // indirect + github.com/opencontainers/image-spec v1.1.0-rc3 // indirect + github.com/opencontainers/runc v1.1.7 // indirect + github.com/opencontainers/runtime-spec v1.1.0-rc.3 // indirect github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69 // indirect github.com/opencontainers/selinux v1.11.0 // indirect github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect github.com/pkg/sftp v1.13.5 // indirect github.com/proglottis/gpgme v0.1.3 // indirect github.com/rivo/uniseg v0.4.4 // indirect - github.com/sigstore/fulcio v1.2.0 // indirect - github.com/sigstore/rekor v1.2.0 // indirect - github.com/sigstore/sigstore v1.6.4 // indirect + github.com/sigstore/fulcio v1.3.1 // indirect + github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 // indirect + github.com/sigstore/sigstore v1.7.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect - github.com/sylabs/sif/v2 v2.11.1 // indirect + github.com/sylabs/sif/v2 v2.11.5 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/theupdateframework/go-tuf v0.5.2 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/ulikunitz/xz v0.5.11 // indirect github.com/vbatts/tar-split v0.11.3 // indirect - github.com/vbauerster/mpb/v8 v8.3.0 // indirect + github.com/vbauerster/mpb/v8 v8.4.0 // indirect go.etcd.io/bbolt v1.3.7 // indirect go.mongodb.org/mongo-driver v1.11.3 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect + golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect golang.org/x/mod v0.10.0 // indirect - golang.org/x/net v0.10.0 // indirect - golang.org/x/sync v0.2.0 // indirect + golang.org/x/net v0.11.0 // indirect + golang.org/x/sync v0.3.0 // indirect golang.org/x/sys v0.9.0 // indirect golang.org/x/term v0.9.0 // indirect golang.org/x/text v0.10.0 // indirect - golang.org/x/tools v0.7.0 // indirect + golang.org/x/tools v0.9.3 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/grpc v1.55.0 // indirect google.golang.org/protobuf v1.30.0 // indirect diff --git a/go.sum b/go.sum index 17472f4d4..33bb64080 100644 --- a/go.sum +++ b/go.sum @@ -22,12 +22,14 @@ cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIA cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= @@ -53,8 +55,8 @@ github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JP github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= -github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= @@ -64,8 +66,8 @@ github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= -github.com/Microsoft/hcsshim v0.10.0-rc.7 h1:HBytQPxcv8Oy4244zbQbe6hnOnx544eL5QPUqhJldz8= -github.com/Microsoft/hcsshim v0.10.0-rc.7/go.mod h1:ILuwjA+kNW+MrN/w5un7n3mTqkwsFu4Bp05/okFUZlE= +github.com/Microsoft/hcsshim v0.10.0-rc.8 h1:YSZVvlIIDD1UxQpJp0h+dnpLUw+TrY0cx8obKsp3bek= +github.com/Microsoft/hcsshim v0.10.0-rc.8/go.mod h1:OEthFdQv/AD2RAdzR6Mm1N1KPCztGKDurW1Z8b8VGMM= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -173,8 +175,8 @@ github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09Zvgq github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.7.0 h1:G/ZQr3gMZs6ZT0qPUZ15znx5QSdQdASW11nXTLTM2Pg= -github.com/containerd/containerd v1.7.0/go.mod h1:QfR7Efgb/6X2BDpTPJRvPTYDE9rsF0FsXX9J8sIs/sc= +github.com/containerd/containerd v1.7.2 h1:UF2gdONnxO8I6byZXDi5sXWiWvlW3D/sci7dTQimEJo= +github.com/containerd/containerd v1.7.2/go.mod h1:afcz74+K10M/+cjGHIVQrCt3RAQhUSCAjJ9iMYhhkuI= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -228,10 +230,10 @@ github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHV github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= github.com/containers/buildah v1.30.0 h1:mdp2COGKFFEZNEGP8VZ5ITuUFVNPFoH+iK2sSesNfTA= github.com/containers/buildah v1.30.0/go.mod h1:lyMLZIevpAa6zSzjRl7z4lFJMCMQLFjfo56YIefaB/U= -github.com/containers/common v0.53.0 h1:Ax814cLeX5VXSnkKUdxz762g+27fJj1st4UvKoXmkKs= -github.com/containers/common v0.53.0/go.mod h1:pABPxJwlTE8oYk9/2BW0e0mumkuhJHIPsABHTGRXN3w= -github.com/containers/image/v5 v5.25.0 h1:TJ0unmalbU+scd0i3Txap2wjGsAnv06MSCwgn6bsizk= -github.com/containers/image/v5 v5.25.0/go.mod h1:EKvys0WVlRFkDw26R8y52TuhV9Tfn0yq2luLX6W52Ls= +github.com/containers/common v0.54.0 h1:jJ2QVuliTa/40QxyDe1ZS1U/7BsDea7qdBeZE0VPu3E= +github.com/containers/common v0.54.0/go.mod h1:xbA3bUfth8p2xmqSg01oxHNDRJA71SAVUCqhyEISKic= +github.com/containers/image/v5 v5.26.0 h1:P9H4+N/7fTTClnFthIWgJU+0LBkhGlW2tCWR+UNG0Vs= +github.com/containers/image/v5 v5.26.0/go.mod h1:QSW67adLL/B4eYsFPG6TjH5Ye4LiLazPAGWk5oQnUdQ= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= @@ -244,8 +246,8 @@ github.com/containers/podman/v4 v4.5.1/go.mod h1:BoNmT1QNzMtDMUCiJ1j1ZoDx6OOn5BA github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY= github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc= github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s= -github.com/containers/storage v1.46.1 h1:GcAe8J0Y6T2CF70fXPojUpnme6zXamuzGCrNujVtIGE= -github.com/containers/storage v1.46.1/go.mod h1:81vNDX4h+nXJ2o0D6Yqy6JGXDYJGVpHZpz0nr09iJuQ= +github.com/containers/storage v1.47.0 h1:Tl/onL8yE/4QABc2kfPDaTSYijk3QrmXGrO21KXkj58= +github.com/containers/storage v1.47.0/go.mod h1:pRp3lkRo2qodb/ltpnudoXggrviRmaCmU5a5GhTBae0= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= @@ -268,9 +270,9 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsr github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= -github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 h1:vU+EP9ZuFUCYE0NYLwTSob+3LNEJATzNfP/DC7SWGWI= -github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 h1:8Pq5UNTC+/UfvcOPKQGZoKCkeF+ZaKa4wJ9OS2gsQQM= +github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= @@ -354,8 +356,8 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= @@ -396,8 +398,8 @@ github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -494,8 +496,8 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= -github.com/google/go-containerregistry v0.14.0 h1:z58vMqHxuwvAsVwvKEkmVBz2TlgBgH5k6koEXBtlYkw= -github.com/google/go-containerregistry v0.14.0/go.mod h1:aiJ2fp/SXvkWgmYHioXnbMdlgB8eXiiYOY55gfN91Wk= +github.com/google/go-containerregistry v0.15.2 h1:MMkSh+tjSdnmJZO7ljvEqV1DjfekB6VUEAZgy3a+TQE= +github.com/google/go-containerregistry v0.15.2/go.mod h1:wWK+LnOv4jXMM23IT/F1wdYftGWGr47Is8CG+pmHK1Q= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -507,8 +509,8 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBxH5Pj7KeFK5l+Y3FsgT8keqKtk= +github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -554,14 +556,11 @@ github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXp github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= -github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -596,11 +595,11 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.16.4 h1:91KN02FnsOYhuunwU4ssRe8lc2JosWmizWa91B5v1PU= -github.com/klauspost/compress v1.16.4/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk= +github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 h1:BcxbplxjtczA1a6d3wYoa7a0WL3rq9DKBMGHeKyjEF0= -github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -659,8 +658,9 @@ github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WT github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= -github.com/mistifyio/go-zfs/v3 v3.0.0 h1:J5QK618xRcXnQYZ2GE5FdmpS1ufIrWue+lR/mpe6/14= github.com/mistifyio/go-zfs/v3 v3.0.0/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= +github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU= +github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -675,8 +675,8 @@ github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vyg github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= -github.com/moby/term v0.0.0-20221120202655-abb19827d345 h1:J9c53/kxIH+2nTKBEfZYFMlhghtHpIHSXpm5VRGHSnU= -github.com/moby/term v0.0.0-20221120202655-abb19827d345/go.mod h1:15ce4BGCFxt7I5NQKT+HV0yEDxmf6fSysfEDiVo3zFM= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -711,8 +711,8 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU= -github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= +github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= +github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -720,8 +720,8 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= +github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -731,8 +731,8 @@ github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3I github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= -github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8= +github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc h1:qjkUzmFsOFbQyjObybk40mRida83j5IHRaKzLGdBbEU= github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc/go.mod h1:wUOQGsiKae6VzA/UvlCK3cO+pHk8F2VQHlIoITEfMM8= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -741,8 +741,8 @@ github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.m github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.1.0-rc.2 h1:ucBtEms2tamYYW/SvGpvq9yUN0NEVL6oyLEwDcTSrk8= -github.com/opencontainers/runtime-spec v1.1.0-rc.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.1.0-rc.3 h1:l04uafi6kxByhbxev7OWiuUv0LZxEsYUfDWZ6bztAuU= +github.com/opencontainers/runtime-spec v1.1.0-rc.3/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69 h1:NL4xDvl68WWqQ+8WPMM3l5PsZTxaT7Z4K3VSKDRuAGs= github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69/go.mod h1:bNpfuSHA3DZRtD0TPWO8LzgtLpFPTVA/3jDkzD/OPyk= @@ -828,12 +828,12 @@ github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624 github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sigstore/fulcio v1.2.0 h1:I4H764cDbryKXkPtasUvo8bcix/7xLvkxWYWNp+JtWI= -github.com/sigstore/fulcio v1.2.0/go.mod h1:FS7qpBvOEqs0uEh1+hJxzxtJistWN29ybLtAzFNUi0c= -github.com/sigstore/rekor v1.2.0 h1:ahlnoEY3zo8Vc+eZLPobamw6YfBTAbI0lthzUQd6qe4= -github.com/sigstore/rekor v1.2.0/go.mod h1:zcFO54qIg2G1/i0sE/nvmELUOng/n0MPjTszRYByVPo= -github.com/sigstore/sigstore v1.6.4 h1:jH4AzR7qlEH/EWzm+opSpxCfuUcjHL+LJPuQE7h40WE= -github.com/sigstore/sigstore v1.6.4/go.mod h1:pjR64lBxnjoSrAr+Ydye/FV73IfrgtoYlAI11a8xMfA= +github.com/sigstore/fulcio v1.3.1 h1:0ntW9VbQbt2JytoSs8BOGB84A65eeyvGSavWteYp29Y= +github.com/sigstore/fulcio v1.3.1/go.mod h1:/XfqazOec45ulJZpyL9sq+OsVQ8g2UOVoNVi7abFgqU= +github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 h1:x/WnxasgR40qGY67IHwioakXLuhDxJ10vF8/INuOTiI= +github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12/go.mod h1:8c+a8Yo7r8gKuYbIaz+c3oOdw9iMXx+tMdOg2+b+2jQ= +github.com/sigstore/sigstore v1.7.1 h1:fCATemikcBK0cG4+NcM940MfoIgmioY1vC6E66hXxks= +github.com/sigstore/sigstore v1.7.1/go.mod h1:0PmMzfJP2Y9+lugD0wer4e7TihR5tM7NcIs3bQNk5xg= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -884,9 +884,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/sylabs/sif/v2 v2.11.1 h1:d09yPukVa8b74wuy+QTA4Is3w8MH0UjO/xlWQUuFzpY= -github.com/sylabs/sif/v2 v2.11.1/go.mod h1:i4GcKLOaT4ertznbsuf11d/G9zLEfUZa7YhrFc5L6YQ= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/sylabs/sif/v2 v2.11.5 h1:7ssPH3epSonsTrzbS1YxeJ9KuqAN7ISlSM61a7j/mQM= +github.com/sylabs/sif/v2 v2.11.5/go.mod h1:GBoZs9LU3e4yJH1dcZ3Akf/jsqYgy5SeguJQC+zd75Y= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= @@ -917,8 +917,8 @@ github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck= github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY= -github.com/vbauerster/mpb/v8 v8.3.0 h1:xw2eMJ6v5NP8Rd7yOVzU6OqnRPrS1yWAoLTrWe7W4Nc= -github.com/vbauerster/mpb/v8 v8.3.0/go.mod h1:bngtYUAu25QGxcYYglsF6oyoHlC9Yhh582xF9LjfmL4= +github.com/vbauerster/mpb/v8 v8.4.0 h1:Jq2iNA7T6SydpMVOwaT+2OBWlXS9Th8KEvBqeu5eeTo= +github.com/vbauerster/mpb/v8 v8.4.0/go.mod h1:vjp3hSTuCtR+x98/+2vW3eZ8XzxvGoP8CPseHMhiPyc= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= @@ -1006,8 +1006,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1074,8 +1074,8 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1 golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= +golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1094,8 +1094,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1261,8 +1261,8 @@ golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4X golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM= +golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/imdario/mergo/.deepsource.toml b/vendor/dario.cat/mergo/.deepsource.toml similarity index 72% rename from vendor/github.com/imdario/mergo/.deepsource.toml rename to vendor/dario.cat/mergo/.deepsource.toml index 8a0681af8..a8bc979e0 100644 --- a/vendor/github.com/imdario/mergo/.deepsource.toml +++ b/vendor/dario.cat/mergo/.deepsource.toml @@ -9,4 +9,4 @@ name = "go" enabled = true [analyzers.meta] - import_path = "github.com/imdario/mergo" \ No newline at end of file + import_path = "dario.cat/mergo" \ No newline at end of file diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/dario.cat/mergo/.gitignore similarity index 100% rename from vendor/github.com/imdario/mergo/.gitignore rename to vendor/dario.cat/mergo/.gitignore diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/dario.cat/mergo/.travis.yml similarity index 100% rename from vendor/github.com/imdario/mergo/.travis.yml rename to vendor/dario.cat/mergo/.travis.yml diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md similarity index 100% rename from vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md rename to vendor/dario.cat/mergo/CODE_OF_CONDUCT.md diff --git a/vendor/github.com/imdario/mergo/CONTRIBUTING.md b/vendor/dario.cat/mergo/CONTRIBUTING.md similarity index 100% rename from vendor/github.com/imdario/mergo/CONTRIBUTING.md rename to vendor/dario.cat/mergo/CONTRIBUTING.md diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/dario.cat/mergo/LICENSE similarity index 100% rename from vendor/github.com/imdario/mergo/LICENSE rename to vendor/dario.cat/mergo/LICENSE diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/dario.cat/mergo/README.md similarity index 89% rename from vendor/github.com/imdario/mergo/README.md rename to vendor/dario.cat/mergo/README.md index 4f0287498..7d0cf9f32 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/dario.cat/mergo/README.md @@ -1,17 +1,20 @@ # Mergo -[![GoDoc][3]][4] [![GitHub release][5]][6] [![GoCard][7]][8] -[![Build Status][1]][2] -[![Coverage Status][9]][10] +[![Test status][1]][2] +[![OpenSSF Scorecard][21]][22] +[![OpenSSF Best Practices][19]][20] +[![Coverage status][9]][10] [![Sourcegraph][11]][12] -[![FOSSA Status][13]][14] +[![FOSSA status][13]][14] + +[![GoDoc][3]][4] [![Become my sponsor][15]][16] [![Tidelift][17]][18] -[1]: https://travis-ci.org/imdario/mergo.png -[2]: https://travis-ci.org/imdario/mergo +[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master +[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml [3]: https://godoc.org/github.com/imdario/mergo?status.svg [4]: https://godoc.org/github.com/imdario/mergo [5]: https://img.shields.io/github/release/imdario/mergo.svg @@ -28,6 +31,10 @@ [16]: https://github.com/sponsors/imdario [17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo [18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo +[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge +[20]: https://bestpractices.coreinfrastructure.org/projects/7177 +[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge +[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. @@ -39,13 +46,19 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). -### Important note +### Important notes + +#### 1.0.0 + +In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. + +#### 0.3.9 Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u dario.cat/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). ### Donations @@ -103,11 +116,11 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont ## Install - go get github.com/imdario/mergo + go get dario.cat/mergo // use in your .go code import ( - "github.com/imdario/mergo" + "dario.cat/mergo" ) ## Usage @@ -145,7 +158,7 @@ package main import ( "fmt" - "github.com/imdario/mergo" + "dario.cat/mergo" ) type Foo struct { @@ -181,9 +194,9 @@ package main import ( "fmt" - "github.com/imdario/mergo" - "reflect" - "time" + "dario.cat/mergo" + "reflect" + "time" ) type timeTransformer struct { @@ -232,5 +245,4 @@ Written by [Dario Castañé](http://dario.im). [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/SECURITY.md b/vendor/dario.cat/mergo/SECURITY.md similarity index 100% rename from vendor/github.com/imdario/mergo/SECURITY.md rename to vendor/dario.cat/mergo/SECURITY.md diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/dario.cat/mergo/doc.go similarity index 88% rename from vendor/github.com/imdario/mergo/doc.go rename to vendor/dario.cat/mergo/doc.go index fcd985f99..7d96ec054 100644 --- a/vendor/github.com/imdario/mergo/doc.go +++ b/vendor/dario.cat/mergo/doc.go @@ -8,30 +8,36 @@ A helper to merge structs and maps in Golang. Useful for configuration default v Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). -Status +# Status It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. -Important note +# Important notes + +1.0.0 + +In 1.0.0 Mergo moves to a vanity URL `dario.cat/mergo`. + +0.3.9 Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u dario.cat/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). -Install +# Install Do your usual installation procedure: - go get github.com/imdario/mergo + go get dario.cat/mergo - // use in your .go code - import ( - "github.com/imdario/mergo" - ) + // use in your .go code + import ( + "dario.cat/mergo" + ) -Usage +# Usage You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). @@ -59,7 +65,7 @@ Here is a nice example: import ( "fmt" - "github.com/imdario/mergo" + "dario.cat/mergo" ) type Foo struct { @@ -81,7 +87,7 @@ Here is a nice example: // {two 2} } -Transformers +# Transformers Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? @@ -89,9 +95,9 @@ Transformers allow to merge specific types differently than in the default behav import ( "fmt" - "github.com/imdario/mergo" - "reflect" - "time" + "dario.cat/mergo" + "reflect" + "time" ) type timeTransformer struct { @@ -127,17 +133,16 @@ Transformers allow to merge specific types differently than in the default behav // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } } -Contact me +# Contact me If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario -About +# About Written by Dario Castañé: https://da.rio.hn -License +# License BSD 3-Clause license, as Go language. - */ package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/dario.cat/mergo/map.go similarity index 100% rename from vendor/github.com/imdario/mergo/map.go rename to vendor/dario.cat/mergo/map.go diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/dario.cat/mergo/merge.go similarity index 100% rename from vendor/github.com/imdario/mergo/merge.go rename to vendor/dario.cat/mergo/merge.go diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/dario.cat/mergo/mergo.go similarity index 100% rename from vendor/github.com/imdario/mergo/mergo.go rename to vendor/dario.cat/mergo/mergo.go diff --git a/vendor/github.com/Azure/go-ansiterm/SECURITY.md b/vendor/github.com/Azure/go-ansiterm/SECURITY.md new file mode 100644 index 000000000..e138ec5d6 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). + + diff --git a/vendor/github.com/Microsoft/go-winio/.golangci.yml b/vendor/github.com/Microsoft/go-winio/.golangci.yml index af403bb13..7b503d26a 100644 --- a/vendor/github.com/Microsoft/go-winio/.golangci.yml +++ b/vendor/github.com/Microsoft/go-winio/.golangci.yml @@ -8,12 +8,8 @@ linters: - containedctx # struct contains a context - dupl # duplicate code - errname # erorrs are named correctly - - goconst # strings that should be constants - - godot # comments end in a period - - misspell - nolintlint # "//nolint" directives are properly explained - revive # golint replacement - - stylecheck # golint replacement, less configurable than revive - unconvert # unnecessary conversions - wastedassign @@ -23,10 +19,7 @@ linters: - exhaustive # check exhaustiveness of enum switch statements - gofmt # files are gofmt'ed - gosec # security - - nestif # deeply nested ifs - nilerr # returns nil even with non-nil error - - prealloc # slices that can be pre-allocated - - structcheck # unused struct fields - unparam # unused function params issues: @@ -42,6 +35,18 @@ issues: text: "^line-length-limit: " source: "^//(go:generate|sys) " + #TODO: remove after upgrading to go1.18 + # ignore comment spacing for nolint and sys directives + - linters: + - revive + text: "^comment-spacings: no space between comment delimiter and comment text" + source: "//(cspell:|nolint:|sys |todo)" + + # not on go 1.18 yet, so no any + - linters: + - revive + text: "^use-any: since GO 1.18 'interface{}' can be replaced by 'any'" + # allow unjustified ignores of error checks in defer statements - linters: - nolintlint @@ -56,6 +61,8 @@ issues: linters-settings: + exhaustive: + default-signifies-exhaustive: true govet: enable-all: true disable: @@ -98,6 +105,8 @@ linters-settings: disabled: true - name: flag-parameter # excessive, and a common idiom we use disabled: true + - name: unhandled-error # warns over common fmt.Print* and io.Close; rely on errcheck instead + disabled: true # general config - name: line-length-limit arguments: @@ -138,7 +147,3 @@ linters-settings: - VPCI - WCOW - WIM - stylecheck: - checks: - - "all" - - "-ST1003" # use revive's var naming diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go index 52f1c280f..c88191658 100644 --- a/vendor/github.com/Microsoft/go-winio/hvsock.go +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -23,7 +23,7 @@ import ( const afHVSock = 34 // AF_HYPERV // Well known Service and VM IDs -//https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards +// https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards // HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions. func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000 @@ -31,7 +31,7 @@ func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000 } // HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions. -func HvsockGUIDBroadcast() guid.GUID { //ffffffff-ffff-ffff-ffff-ffffffffffff +func HvsockGUIDBroadcast() guid.GUID { // ffffffff-ffff-ffff-ffff-ffffffffffff return guid.GUID{ Data1: 0xffffffff, Data2: 0xffff, @@ -246,7 +246,7 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) { var addrbuf [addrlen * 2]byte var bytes uint32 - err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /*rxdatalen*/, addrlen, addrlen, &bytes, &c.o) + err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o) if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil { return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) } diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go b/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go new file mode 100644 index 000000000..1f6538817 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go @@ -0,0 +1,2 @@ +// This package contains Win32 filesystem functionality. +package fs diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go new file mode 100644 index 000000000..509b3ec64 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go @@ -0,0 +1,202 @@ +//go:build windows + +package fs + +import ( + "golang.org/x/sys/windows" + + "github.com/Microsoft/go-winio/internal/stringbuffer" +) + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go + +// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew +//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW + +const NullHandle windows.Handle = 0 + +// AccessMask defines standard, specific, and generic rights. +// +// Bitmask: +// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 +// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 +// +---------------+---------------+-------------------------------+ +// |G|G|G|G|Resvd|A| StandardRights| SpecificRights | +// |R|W|E|A| |S| | | +// +-+-------------+---------------+-------------------------------+ +// +// GR Generic Read +// GW Generic Write +// GE Generic Exectue +// GA Generic All +// Resvd Reserved +// AS Access Security System +// +// https://learn.microsoft.com/en-us/windows/win32/secauthz/access-mask +// +// https://learn.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights +// +// https://learn.microsoft.com/en-us/windows/win32/fileio/file-access-rights-constants +type AccessMask = windows.ACCESS_MASK + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // Not actually any. + // + // For CreateFile: "query certain metadata such as file, directory, or device attributes without accessing that file or device" + // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters + FILE_ANY_ACCESS AccessMask = 0 + + // Specific Object Access + // from ntioapi.h + + FILE_READ_DATA AccessMask = (0x0001) // file & pipe + FILE_LIST_DIRECTORY AccessMask = (0x0001) // directory + + FILE_WRITE_DATA AccessMask = (0x0002) // file & pipe + FILE_ADD_FILE AccessMask = (0x0002) // directory + + FILE_APPEND_DATA AccessMask = (0x0004) // file + FILE_ADD_SUBDIRECTORY AccessMask = (0x0004) // directory + FILE_CREATE_PIPE_INSTANCE AccessMask = (0x0004) // named pipe + + FILE_READ_EA AccessMask = (0x0008) // file & directory + FILE_READ_PROPERTIES AccessMask = FILE_READ_EA + + FILE_WRITE_EA AccessMask = (0x0010) // file & directory + FILE_WRITE_PROPERTIES AccessMask = FILE_WRITE_EA + + FILE_EXECUTE AccessMask = (0x0020) // file + FILE_TRAVERSE AccessMask = (0x0020) // directory + + FILE_DELETE_CHILD AccessMask = (0x0040) // directory + + FILE_READ_ATTRIBUTES AccessMask = (0x0080) // all + + FILE_WRITE_ATTRIBUTES AccessMask = (0x0100) // all + + FILE_ALL_ACCESS AccessMask = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1FF) + FILE_GENERIC_READ AccessMask = (STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE) + FILE_GENERIC_WRITE AccessMask = (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE) + FILE_GENERIC_EXECUTE AccessMask = (STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE) + + SPECIFIC_RIGHTS_ALL AccessMask = 0x0000FFFF + + // Standard Access + // from ntseapi.h + + DELETE AccessMask = 0x0001_0000 + READ_CONTROL AccessMask = 0x0002_0000 + WRITE_DAC AccessMask = 0x0004_0000 + WRITE_OWNER AccessMask = 0x0008_0000 + SYNCHRONIZE AccessMask = 0x0010_0000 + + STANDARD_RIGHTS_REQUIRED AccessMask = 0x000F_0000 + + STANDARD_RIGHTS_READ AccessMask = READ_CONTROL + STANDARD_RIGHTS_WRITE AccessMask = READ_CONTROL + STANDARD_RIGHTS_EXECUTE AccessMask = READ_CONTROL + + STANDARD_RIGHTS_ALL AccessMask = 0x001F_0000 +) + +type FileShareMode uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + FILE_SHARE_NONE FileShareMode = 0x00 + FILE_SHARE_READ FileShareMode = 0x01 + FILE_SHARE_WRITE FileShareMode = 0x02 + FILE_SHARE_DELETE FileShareMode = 0x04 + FILE_SHARE_VALID_FLAGS FileShareMode = 0x07 +) + +type FileCreationDisposition uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // from winbase.h + + CREATE_NEW FileCreationDisposition = 0x01 + CREATE_ALWAYS FileCreationDisposition = 0x02 + OPEN_EXISTING FileCreationDisposition = 0x03 + OPEN_ALWAYS FileCreationDisposition = 0x04 + TRUNCATE_EXISTING FileCreationDisposition = 0x05 +) + +// CreateFile and co. take flags or attributes together as one parameter. +// Define alias until we can use generics to allow both + +// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants +type FileFlagOrAttribute uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( // from winnt.h + FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000 + FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000 + FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000 + FILE_FLAG_RANDOM_ACCESS FileFlagOrAttribute = 0x1000_0000 + FILE_FLAG_SEQUENTIAL_SCAN FileFlagOrAttribute = 0x0800_0000 + FILE_FLAG_DELETE_ON_CLOSE FileFlagOrAttribute = 0x0400_0000 + FILE_FLAG_BACKUP_SEMANTICS FileFlagOrAttribute = 0x0200_0000 + FILE_FLAG_POSIX_SEMANTICS FileFlagOrAttribute = 0x0100_0000 + FILE_FLAG_OPEN_REPARSE_POINT FileFlagOrAttribute = 0x0020_0000 + FILE_FLAG_OPEN_NO_RECALL FileFlagOrAttribute = 0x0010_0000 + FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000 +) + +type FileSQSFlag = FileFlagOrAttribute + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( // from winbase.h + SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16) + SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16) + SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16) + SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16) + + SECURITY_SQOS_PRESENT FileSQSFlag = 0x00100000 + SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F0000 +) + +// GetFinalPathNameByHandle flags +// +// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew#parameters +type GetFinalPathFlag uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + GetFinalPathDefaultFlag GetFinalPathFlag = 0x0 + + FILE_NAME_NORMALIZED GetFinalPathFlag = 0x0 + FILE_NAME_OPENED GetFinalPathFlag = 0x8 + + VOLUME_NAME_DOS GetFinalPathFlag = 0x0 + VOLUME_NAME_GUID GetFinalPathFlag = 0x1 + VOLUME_NAME_NT GetFinalPathFlag = 0x2 + VOLUME_NAME_NONE GetFinalPathFlag = 0x4 +) + +// getFinalPathNameByHandle facilitates calling the Windows API GetFinalPathNameByHandle +// with the given handle and flags. It transparently takes care of creating a buffer of the +// correct size for the call. +// +// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew +func GetFinalPathNameByHandle(h windows.Handle, flags GetFinalPathFlag) (string, error) { + b := stringbuffer.NewWString() + //TODO: can loop infinitely if Win32 keeps returning the same (or a larger) n? + for { + n, err := windows.GetFinalPathNameByHandle(h, b.Pointer(), b.Cap(), uint32(flags)) + if err != nil { + return "", err + } + // If the buffer wasn't large enough, n will be the total size needed (including null terminator). + // Resize and try again. + if n > b.Cap() { + b.ResizeTo(n) + continue + } + // If the buffer is large enough, n will be the size not including the null terminator. + // Convert to a Go string and return. + return b.String(), nil + } +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/security.go b/vendor/github.com/Microsoft/go-winio/internal/fs/security.go new file mode 100644 index 000000000..81760ac67 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/security.go @@ -0,0 +1,12 @@ +package fs + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level +type SecurityImpersonationLevel int32 // C default enums underlying type is `int`, which is Go `int32` + +// Impersonation levels +const ( + SecurityAnonymous SecurityImpersonationLevel = 0 + SecurityIdentification SecurityImpersonationLevel = 1 + SecurityImpersonation SecurityImpersonationLevel = 2 + SecurityDelegation SecurityImpersonationLevel = 3 +) diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go new file mode 100644 index 000000000..e2f7bb24e --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go @@ -0,0 +1,64 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package fs + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procCreateFileW = modkernel32.NewProc("CreateFileW") +) + +func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile) +} + +func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = windows.Handle(r0) + if handle == windows.InvalidHandle { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go index 39e8c05f8..aeb7b7250 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go @@ -100,8 +100,8 @@ func (f *runtimeFunc) Load() error { (*byte)(unsafe.Pointer(&f.addr)), uint32(unsafe.Sizeof(f.addr)), &n, - nil, //overlapped - 0, //completionRoutine + nil, // overlapped + 0, // completionRoutine ) }) return f.err diff --git a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go new file mode 100644 index 000000000..7ad505702 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go @@ -0,0 +1,132 @@ +package stringbuffer + +import ( + "sync" + "unicode/utf16" +) + +// TODO: worth exporting and using in mkwinsyscall? + +// Uint16BufferSize is the buffer size in the pool, chosen somewhat arbitrarily to accommodate +// large path strings: +// MAX_PATH (260) + size of volume GUID prefix (49) + null terminator = 310. +const MinWStringCap = 310 + +// use *[]uint16 since []uint16 creates an extra allocation where the slice header +// is copied to heap and then referenced via pointer in the interface header that sync.Pool +// stores. +var pathPool = sync.Pool{ // if go1.18+ adds Pool[T], use that to store []uint16 directly + New: func() interface{} { + b := make([]uint16, MinWStringCap) + return &b + }, +} + +func newBuffer() []uint16 { return *(pathPool.Get().(*[]uint16)) } + +// freeBuffer copies the slice header data, and puts a pointer to that in the pool. +// This avoids taking a pointer to the slice header in WString, which can be set to nil. +func freeBuffer(b []uint16) { pathPool.Put(&b) } + +// WString is a wide string buffer ([]uint16) meant for storing UTF-16 encoded strings +// for interacting with Win32 APIs. +// Sizes are specified as uint32 and not int. +// +// It is not thread safe. +type WString struct { + // type-def allows casting to []uint16 directly, use struct to prevent that and allow adding fields in the future. + + // raw buffer + b []uint16 +} + +// NewWString returns a [WString] allocated from a shared pool with an +// initial capacity of at least [MinWStringCap]. +// Since the buffer may have been previously used, its contents are not guaranteed to be empty. +// +// The buffer should be freed via [WString.Free] +func NewWString() *WString { + return &WString{ + b: newBuffer(), + } +} + +func (b *WString) Free() { + if b.empty() { + return + } + freeBuffer(b.b) + b.b = nil +} + +// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the +// previous buffer back into pool. +func (b *WString) ResizeTo(c uint32) uint32 { + // allready sufficient (or n is 0) + if c <= b.Cap() { + return b.Cap() + } + + if c <= MinWStringCap { + c = MinWStringCap + } + // allocate at-least double buffer size, as is done in [bytes.Buffer] and other places + if c <= 2*b.Cap() { + c = 2 * b.Cap() + } + + b2 := make([]uint16, c) + if !b.empty() { + copy(b2, b.b) + freeBuffer(b.b) + } + b.b = b2 + return c +} + +// Buffer returns the underlying []uint16 buffer. +func (b *WString) Buffer() []uint16 { + if b.empty() { + return nil + } + return b.b +} + +// Pointer returns a pointer to the first uint16 in the buffer. +// If the [WString.Free] has already been called, the pointer will be nil. +func (b *WString) Pointer() *uint16 { + if b.empty() { + return nil + } + return &b.b[0] +} + +// String returns the returns the UTF-8 encoding of the UTF-16 string in the buffer. +// +// It assumes that the data is null-terminated. +func (b *WString) String() string { + // Using [windows.UTF16ToString] would require importing "golang.org/x/sys/windows" + // and would make this code Windows-only, which makes no sense. + // So copy UTF16ToString code into here. + // If other windows-specific code is added, switch to [windows.UTF16ToString] + + s := b.b + for i, v := range s { + if v == 0 { + s = s[:i] + break + } + } + return string(utf16.Decode(s)) +} + +// Cap returns the underlying buffer capacity. +func (b *WString) Cap() uint32 { + if b.empty() { + return 0 + } + return b.cap() +} + +func (b *WString) cap() uint32 { return uint32(cap(b.b)) } +func (b *WString) empty() bool { return b == nil || b.cap() == 0 } diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go index ca6e38fc0..25cc81103 100644 --- a/vendor/github.com/Microsoft/go-winio/pipe.go +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -16,11 +16,12 @@ import ( "unsafe" "golang.org/x/sys/windows" + + "github.com/Microsoft/go-winio/internal/fs" ) //sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe //sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW -//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW //sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo //sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc @@ -163,19 +164,21 @@ func (s pipeAddress) String() string { } // tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. -func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) { +func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask) (syscall.Handle, error) { for { select { case <-ctx.Done(): return syscall.Handle(0), ctx.Err() default: - h, err := createFile(*path, + wh, err := fs.CreateFile(*path, access, - 0, - nil, - syscall.OPEN_EXISTING, - windows.FILE_FLAG_OVERLAPPED|windows.SECURITY_SQOS_PRESENT|windows.SECURITY_ANONYMOUS, - 0) + 0, // mode + nil, // security attributes + fs.OPEN_EXISTING, + fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.SECURITY_ANONYMOUS, + 0, // template file handle + ) + h := syscall.Handle(wh) if err == nil { return h, nil } @@ -219,7 +222,7 @@ func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { var err error var h syscall.Handle - h, err = tryDialPipe(ctx, &path, access) + h, err = tryDialPipe(ctx, &path, fs.AccessMask(access)) if err != nil { return nil, err } @@ -279,6 +282,7 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy } defer localFree(ntPath.Buffer) oa.ObjectName = &ntPath + oa.Attributes = windows.OBJ_CASE_INSENSITIVE // The security descriptor is only needed for the first pipe. if first { diff --git a/vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/mkwinsyscall.go b/vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/mkwinsyscall.go index e72be3138..20d9e3d27 100644 --- a/vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/mkwinsyscall.go +++ b/vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/mkwinsyscall.go @@ -477,15 +477,14 @@ func newFn(s string) (*Fn, error) { return nil, errors.New("Could not extract dll name from \"" + f.src + "\"") } s = trim(s[1:]) - a := strings.Split(s, ".") - switch len(a) { - case 1: - f.dllfuncname = a[0] - case 2: - f.dllname = a[0] - f.dllfuncname = a[1] - default: - return nil, errors.New("Could not extract dll name from \"" + f.src + "\"") + if i := strings.LastIndex(s, "."); i >= 0 { + f.dllname = s[:i] + f.dllfuncname = s[i+1:] + } else { + f.dllfuncname = s + } + if f.dllfuncname == "" { + return nil, fmt.Errorf("function name is not specified in %q", s) } if n := f.dllfuncname; endsIn(n, '?') { f.dllfuncname = n[:len(n)-1] @@ -502,7 +501,23 @@ func (f *Fn) DLLName() string { return f.dllname } -// DLLName returns DLL function name for function f. +// DLLVar returns a valid Go identifier that represents DLLName. +func (f *Fn) DLLVar() string { + id := strings.Map(func(r rune) rune { + switch r { + case '.', '-': + return '_' + default: + return r + } + }, f.DLLName()) + if !token.IsIdentifier(id) { + panic(fmt.Errorf("could not create Go identifier for DLLName %q", f.DLLName())) + } + return id +} + +// DLLFuncName returns DLL function name for function f. func (f *Fn) DLLFuncName() string { if f.dllfuncname == "" { return f.Name @@ -648,6 +663,13 @@ func (f *Fn) HelperName() string { return "_" + f.Name } +// DLL is a DLL's filename and a string that is valid in a Go identifier that should be used when +// naming a variable that refers to the DLL. +type DLL struct { + Name string + Var string +} + // Source files and functions. type Source struct { Funcs []*Fn @@ -697,18 +719,20 @@ func ParseFiles(fs []string) (*Source, error) { } // DLLs return dll names for a source set src. -func (src *Source) DLLs() []string { +func (src *Source) DLLs() []DLL { uniq := make(map[string]bool) - r := make([]string, 0) + r := make([]DLL, 0) for _, f := range src.Funcs { - name := f.DLLName() - if _, found := uniq[name]; !found { - uniq[name] = true - r = append(r, name) + id := f.DLLVar() + if _, found := uniq[id]; !found { + uniq[id] = true + r = append(r, DLL{f.DLLName(), id}) } } if *sortdecls { - sort.Strings(r) + sort.Slice(r, func(i, j int) bool { + return r[i].Var < r[j].Var + }) } return r } @@ -878,6 +902,22 @@ func (src *Source) Generate(w io.Writer) error { return nil } +func writeTempSourceFile(data []byte) (string, error) { + f, err := os.CreateTemp("", "mkwinsyscall-generated-*.go") + if err != nil { + return "", err + } + _, err = f.Write(data) + if closeErr := f.Close(); err == nil { + err = closeErr + } + if err != nil { + os.Remove(f.Name()) // best effort + return "", err + } + return f.Name(), nil +} + func usage() { fmt.Fprintf(os.Stderr, "usage: mkwinsyscall [flags] [path ...]\n") flag.PrintDefaults() @@ -904,7 +944,12 @@ func main() { data, err := format.Source(buf.Bytes()) if err != nil { - log.Fatal(err) + log.Printf("failed to format source: %v", err) + f, err := writeTempSourceFile(buf.Bytes()) + if err != nil { + log.Fatalf("failed to write unformatted source to file: %v", err) + } + log.Fatalf("for diagnosis, wrote unformatted source to %v", f) } if *filename == "" { _, err = os.Stdout.Write(data) @@ -970,10 +1015,10 @@ var ( {{/* help functions */}} -{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{newlazydll .}} +{{define "dlls"}}{{range .DLLs}} mod{{.Var}} = {{newlazydll .Name}} {{end}}{{end}} -{{define "funcnames"}}{{range .DLLFuncNames}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}") +{{define "funcnames"}}{{range .DLLFuncNames}} proc{{.DLLFuncName}} = mod{{.DLLVar}}.NewProc("{{.DLLFuncName}}") {{end}}{{end}} {{define "helperbody"}} diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go index 83f45a135..469b16f63 100644 --- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -63,7 +63,6 @@ var ( procBackupWrite = modkernel32.NewProc("BackupWrite") procCancelIoEx = modkernel32.NewProc("CancelIoEx") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procCreateFileW = modkernel32.NewProc("CreateFileW") procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") @@ -305,24 +304,6 @@ func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { return } -func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) -} - -func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = errnoErr(e1) - } - return -} - func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) newport = syscall.Handle(r0) diff --git a/vendor/github.com/Microsoft/hcsshim/Makefile b/vendor/github.com/Microsoft/hcsshim/Makefile index 742c76d84..d8eb30b86 100644 --- a/vendor/github.com/Microsoft/hcsshim/Makefile +++ b/vendor/github.com/Microsoft/hcsshim/Makefile @@ -94,23 +94,9 @@ out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/ho tar -zcf $@ -C rootfs . rm -rf rootfs --include deps/cmd/gcs.gomake --include deps/cmd/gcstools.gomake --include deps/cmd/hooks/wait-paths.gomake --include deps/cmd/tar2ext4.gomake --include deps/internal/tools/snp-report.gomake - -# Implicit rule for includes that define Go targets. -%.gomake: $(SRCROOT)/Makefile +bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report: @mkdir -p $(dir $@) - @/bin/echo $(@:deps/%.gomake=bin/%): $(SRCROOT)/hack/gomakedeps.sh > $@.new - @/bin/echo -e '\t@mkdir -p $$(dir $$@) $(dir $@)' >> $@.new - @/bin/echo -e '\t$$(GO_BUILD) -o $$@.new $$(SRCROOT)/$$(@:bin/%=%)' >> $@.new - @/bin/echo -e '\tGO="$(GO)" $$(SRCROOT)/hack/gomakedeps.sh $$@ $$(SRCROOT)/$$(@:bin/%=%) $$(GO_FLAGS) $$(GO_FLAGS_EXTRA) > $(@:%.gomake=%.godeps).new' >> $@.new - @/bin/echo -e '\tmv $(@:%.gomake=%.godeps).new $(@:%.gomake=%.godeps)' >> $@.new - @/bin/echo -e '\tmv $$@.new $$@' >> $@.new - @/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new - mv $@.new $@ + GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%) bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o @mkdir -p bin diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/format.go b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go new file mode 100644 index 000000000..4b6500333 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go @@ -0,0 +1,85 @@ +package log + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net" + "reflect" + "time" + + "github.com/containerd/containerd/log" +) + +const TimeFormat = log.RFC3339NanoFixed + +func FormatTime(t time.Time) string { + return t.Format(TimeFormat) +} + +// DurationFormat formats a [time.Duration] log entry. +// +// A nil value signals an error with the formatting. +type DurationFormat func(time.Duration) interface{} + +func DurationFormatString(d time.Duration) interface{} { return d.String() } +func DurationFormatSeconds(d time.Duration) interface{} { return d.Seconds() } +func DurationFormatMilliseconds(d time.Duration) interface{} { return d.Milliseconds() } + +// FormatIO formats net.Conn and other types that have an `Addr()` or `Name()`. +// +// See FormatEnabled for more information. +func FormatIO(ctx context.Context, v interface{}) string { + m := make(map[string]string) + m["type"] = reflect.TypeOf(v).String() + + switch t := v.(type) { + case net.Conn: + m["localAddress"] = formatAddr(t.LocalAddr()) + m["remoteAddress"] = formatAddr(t.RemoteAddr()) + case interface{ Addr() net.Addr }: + m["address"] = formatAddr(t.Addr()) + default: + return Format(ctx, t) + } + + return Format(ctx, m) +} + +func formatAddr(a net.Addr) string { + return a.Network() + "://" + a.String() +} + +// Format formats an object into a JSON string, without any indendtation or +// HTML escapes. +// Context is used to output a log waring if the conversion fails. +// +// This is intended primarily for `trace.StringAttribute()` +func Format(ctx context.Context, v interface{}) string { + b, err := encode(v) + if err != nil { + G(ctx).WithError(err).Warning("could not format value") + return "" + } + + return string(b) +} + +func encode(v interface{}) ([]byte, error) { + return encodeBuffer(&bytes.Buffer{}, v) +} + +func encodeBuffer(buf *bytes.Buffer, v interface{}) ([]byte, error) { + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + enc.SetIndent("", "") + + if err := enc.Encode(v); err != nil { + err = fmt.Errorf("could not marshall %T to JSON for logging: %w", v, err) + return nil, err + } + + // encoder.Encode appends a newline to the end + return bytes.TrimSpace(buf.Bytes()), nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go b/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go index 8f8940592..94c6d0918 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go @@ -1,23 +1,58 @@ package log import ( + "bytes" + "reflect" + "time" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/containerd/containerd/log" "github.com/sirupsen/logrus" "go.opencensus.io/trace" ) -// Hook serves to intercept and format `logrus.Entry`s before they are passed -// to the ETW hook. +const nullString = "null" + +// Hook intercepts and formats a [logrus.Entry] before it logged. // -// The containerd shim discards the (formatted) logrus output, and outputs only via ETW. -// The Linux GCS outputs logrus entries over stdout, which is consumed by the shim and -// then re-output via the ETW hook. -type Hook struct{} +// The shim either outputs the logs through an ETW hook, discarding the (formatted) output +// or logs output to a pipe for logging binaries to consume. +// The Linux GCS outputs logrus entries over stdout, which is then consumed and re-output +// by the shim. +type Hook struct { + // EncodeAsJSON formats structs, maps, arrays, slices, and [bytes.Buffer] as JSON. + // Variables of [bytes.Buffer] will be converted to []byte. + // + // Default is false. + EncodeAsJSON bool + + // FormatTime specifies the format for [time.Time] variables. + // An empty string disables formatting. + // When disabled, the fall back will the JSON encoding, if enabled. + // + // Default is [github.com/containerd/containerd/log.RFC3339NanoFixed]. + TimeFormat string + + // Duration format converts a [time.Duration] fields to an appropriate encoding. + // nil disables formatting. + // When disabled, the fall back will the JSON encoding, if enabled. + // + // Default is [DurationFormatString], which appends a duration unit after the value. + DurationFormat DurationFormat + + // AddSpanContext adds [logfields.TraceID] and [logfields.SpanID] fields to + // the entry from the span context stored in [logrus.Entry.Context], if it exists. + AddSpanContext bool +} var _ logrus.Hook = &Hook{} func NewHook() *Hook { - return &Hook{} + return &Hook{ + TimeFormat: log.RFC3339NanoFixed, + DurationFormat: DurationFormatString, + AddSpanContext: true, + } } func (h *Hook) Levels() []logrus.Level { @@ -25,14 +60,108 @@ func (h *Hook) Levels() []logrus.Level { } func (h *Hook) Fire(e *logrus.Entry) (err error) { + // JSON encode, if necessary, then add span information + h.encode(e) h.addSpanContext(e) return nil } +// encode loops through all the fields in the [logrus.Entry] and encodes them according to +// the settings in [Hook]. +// If [Hook.TimeFormat] is non-empty, it will be passed to [time.Time.Format] for +// fields of type [time.Time]. +// +// If [Hook.EncodeAsJSON] is true, then fields that are not numeric, boolean, strings, or +// errors will be encoded via a [json.Marshal] (with HTML escaping disabled). +// Chanel- and function-typed fields, as well as unsafe pointers are left alone and not encoded. +// +// If [Hook.TimeFormat] and [Hook.DurationFormat] are empty and [Hook.EncodeAsJSON] is false, +// then this is a no-op. +func (h *Hook) encode(e *logrus.Entry) { + d := e.Data + + formatTime := h.TimeFormat != "" + formatDuration := h.DurationFormat != nil + if !(h.EncodeAsJSON || formatTime || formatDuration) { + return + } + + for k, v := range d { + // encode types with dedicated formatting options first + + if vv, ok := v.(time.Time); formatTime && ok { + d[k] = vv.Format(h.TimeFormat) + continue + } + + if vv, ok := v.(time.Duration); formatDuration && ok { + d[k] = h.DurationFormat(vv) + continue + } + + // general case JSON encoding + + if !h.EncodeAsJSON { + continue + } + + switch vv := v.(type) { + // built in types + // "json" marshals errors as "{}", so leave alone here + case bool, string, error, uintptr, + int8, int16, int32, int64, int, + uint8, uint32, uint64, uint, + float32, float64: + continue + + // Rather than setting d[k] = vv.String(), JSON encode []byte value, since it + // may be a binary payload and not representable as a string. + // `case bytes.Buffer,*bytes.Buffer:` resolves `vv` to `interface{}`, + // so cannot use `vv.Bytes`. + // Could move to below the `reflect.Indirect()` call below, but + // that would require additional typematching and dereferencing. + // Easier to keep these duplicate branches here. + case bytes.Buffer: + v = vv.Bytes() + case *bytes.Buffer: + v = vv.Bytes() + } + + // dereference pointer or interface variables + rv := reflect.Indirect(reflect.ValueOf(v)) + // check if `v` is a null pointer + if !rv.IsValid() { + d[k] = nullString + continue + } + + switch rv.Kind() { + case reflect.Map, reflect.Struct, reflect.Array, reflect.Slice: + default: + // Bool, [U]?Int*, Float*, Complex*, Uintptr, String: encoded as normal + // Chan, Func: not supported by json + // Interface, Pointer: dereferenced above + // UnsafePointer: not supported by json, not safe to de-reference; leave alone + continue + } + + b, err := encode(v) + if err != nil { + // Errors are written to stderr (ie, to `panic.log`) and stops the remaining + // hooks (ie, exporting to ETW) from firing. So add encoding errors to + // the entry data to be written out, but keep on processing. + d[k+"-"+logrus.ErrorKey] = err.Error() + // keep the original `v` as the value, + continue + } + d[k] = string(b) + } +} + func (h *Hook) addSpanContext(e *logrus.Entry) { ctx := e.Context - if ctx == nil { + if !h.AddSpanContext || ctx == nil { return } span := trace.FromContext(ctx) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go b/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go index d51e0fd89..d1ef15096 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/json" "errors" - "strings" "sync/atomic" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" @@ -56,11 +55,11 @@ func ScrubProcessParameters(s string) (string, error) { } pp.Environment = map[string]string{_scrubbedReplacement: _scrubbedReplacement} - buf := bytes.NewBuffer(b[:0]) - if err := encode(buf, pp); err != nil { + b, err := encodeBuffer(bytes.NewBuffer(b[:0]), pp) + if err != nil { return "", err } - return strings.TrimSpace(buf.String()), nil + return string(b), nil } // ScrubBridgeCreate scrubs requests sent over the bridge of type @@ -150,21 +149,12 @@ func scrubBytes(b []byte, scrub scrubberFunc) ([]byte, error) { return nil, err } - buf := &bytes.Buffer{} - if err := encode(buf, m); err != nil { + b, err := encode(m) + if err != nil { return nil, err } - return bytes.TrimSpace(buf.Bytes()), nil -} - -func encode(buf *bytes.Buffer, v interface{}) error { - enc := json.NewEncoder(buf) - enc.SetEscapeHTML(false) - if err := enc.Encode(v); err != nil { - return err - } - return nil + return b, nil } func isRequestBase(m genMap) bool { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go new file mode 100644 index 000000000..71df25b8d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go @@ -0,0 +1,69 @@ +package oc + +import ( + "errors" + "io" + "net" + "os" + + "github.com/containerd/containerd/errdefs" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// todo: break import cycle with "internal/hcs/errors.go" and reference errors defined there +// todo: add errors defined in "internal/guest/gcserror" (Hresult does not implement error) + +func toStatusCode(err error) codes.Code { + // checks if err implements GRPCStatus() *"google.golang.org/grpc/status".Status, + // wraps an error defined in "github.com/containerd/containerd/errdefs", or is a + // context timeout or cancelled error + if s, ok := status.FromError(errdefs.ToGRPC(err)); ok { + return s.Code() + } + + switch { + // case isAny(err): + // return codes.Cancelled + case isAny(err, os.ErrInvalid): + return codes.InvalidArgument + case isAny(err, os.ErrDeadlineExceeded): + return codes.DeadlineExceeded + case isAny(err, os.ErrNotExist): + return codes.NotFound + case isAny(err, os.ErrExist): + return codes.AlreadyExists + case isAny(err, os.ErrPermission): + return codes.PermissionDenied + // case isAny(err): + // return codes.ResourceExhausted + case isAny(err, os.ErrClosed, net.ErrClosed, io.ErrClosedPipe, io.ErrShortBuffer): + return codes.FailedPrecondition + // case isAny(err): + // return codes.Aborted + // case isAny(err): + // return codes.OutOfRange + // case isAny(err): + // return codes.Unimplemented + case isAny(err, io.ErrNoProgress): + return codes.Internal + // case isAny(err): + // return codes.Unavailable + case isAny(err, io.ErrShortWrite, io.ErrUnexpectedEOF): + return codes.DataLoss + // case isAny(err): + // return codes.Unauthenticated + default: + return codes.Unknown + } +} + +// isAny returns true if errors.Is is true for any of the provided errors, errs. +func isAny(err error, errs ...error) bool { + for _, e := range errs { + if errors.Is(err, e) { + return true + } + } + return false +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go index f428bdaf7..28f8f43a9 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go @@ -3,19 +3,26 @@ package oc import ( "github.com/sirupsen/logrus" "go.opencensus.io/trace" + "google.golang.org/grpc/codes" + + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/logfields" ) -var _ = (trace.Exporter)(&LogrusExporter{}) +const spanMessage = "Span" + +var _errorCodeKey = logrus.ErrorKey + "Code" // LogrusExporter is an OpenCensus `trace.Exporter` that exports // `trace.SpanData` to logrus output. -type LogrusExporter struct { -} +type LogrusExporter struct{} + +var _ trace.Exporter = &LogrusExporter{} // ExportSpan exports `s` based on the the following rules: // -// 1. All output will contain `s.Attributes`, `s.TraceID`, `s.SpanID`, -// `s.ParentSpanID` for correlation +// 1. All output will contain `s.Attributes`, `s.SpanKind`, `s.TraceID`, +// `s.SpanID`, and `s.ParentSpanID` for correlation // // 2. Any calls to .Annotate will not be supported. // @@ -23,21 +30,57 @@ type LogrusExporter struct { // `s.Status.Code != 0` in which case it will be written at `logrus.ErrorLevel` // providing `s.Status.Message` as the error value. func (le *LogrusExporter) ExportSpan(s *trace.SpanData) { - // Combine all span annotations with traceID, spanID, parentSpanID - baseEntry := logrus.WithFields(logrus.Fields(s.Attributes)) - baseEntry.Data["traceID"] = s.TraceID.String() - baseEntry.Data["spanID"] = s.SpanID.String() - baseEntry.Data["parentSpanID"] = s.ParentSpanID.String() - baseEntry.Data["startTime"] = s.StartTime - baseEntry.Data["endTime"] = s.EndTime - baseEntry.Data["duration"] = s.EndTime.Sub(s.StartTime).String() - baseEntry.Data["name"] = s.Name - baseEntry.Time = s.StartTime + if s.DroppedAnnotationCount > 0 { + logrus.WithFields(logrus.Fields{ + "name": s.Name, + logfields.TraceID: s.TraceID.String(), + logfields.SpanID: s.SpanID.String(), + "dropped": s.DroppedAttributeCount, + "maxAttributes": len(s.Attributes), + }).Warning("span had dropped attributes") + } + + entry := log.L.Dup() + // Combine all span annotations with span data (eg, trace ID, span ID, parent span ID, + // error, status code) + // (OC) Span attributes are guaranteed to be strings, bools, or int64s, so we can + // can skip overhead in entry.WithFields() and add them directly to entry.Data. + // Preallocate ahead of time, since we should add, at most, 10 additional entries + data := make(logrus.Fields, len(entry.Data)+len(s.Attributes)+10) + + // Default log entry may have prexisting/application-wide data + for k, v := range entry.Data { + data[k] = v + } + for k, v := range s.Attributes { + data[k] = v + } + + data[logfields.Name] = s.Name + data[logfields.TraceID] = s.TraceID.String() + data[logfields.SpanID] = s.SpanID.String() + data[logfields.ParentSpanID] = s.ParentSpanID.String() + data[logfields.StartTime] = s.StartTime + data[logfields.EndTime] = s.EndTime + data[logfields.Duration] = s.EndTime.Sub(s.StartTime) + if sk := spanKindToString(s.SpanKind); sk != "" { + data["spanKind"] = sk + } level := logrus.InfoLevel if s.Status.Code != 0 { level = logrus.ErrorLevel - baseEntry.Data[logrus.ErrorKey] = s.Status.Message + + // don't overwrite an existing "error" or "errorCode" attributes + if _, ok := data[logrus.ErrorKey]; !ok { + data[logrus.ErrorKey] = s.Status.Message + } + if _, ok := data[_errorCodeKey]; !ok { + data[_errorCodeKey] = codes.Code(s.Status.Code).String() + } } - baseEntry.Log(level, "Span") + + entry.Data = data + entry.Time = s.StartTime + entry.Log(level, spanMessage) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go index 0e2b7e9bf..726078432 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go @@ -14,8 +14,7 @@ var DefaultSampler = trace.AlwaysSample() func SetSpanStatus(span *trace.Span, err error) { status := trace.Status{} if err != nil { - // TODO: JTERRY75 - Handle errors in a non-generic way - status.Code = trace.StatusCodeUnknown + status.Code = int32(toStatusCode(err)) status.Message = err.Error() } span.SetStatus(status) @@ -46,3 +45,14 @@ func update(ctx context.Context, s *trace.Span) (context.Context, *trace.Span) { var WithServerSpanKind = trace.WithSpanKind(trace.SpanKindServer) var WithClientSpanKind = trace.WithSpanKind(trace.SpanKindClient) + +func spanKindToString(sk int) string { + switch sk { + case trace.SpanKindClient: + return "client" + case trace.SpanKindServer: + return "server" + default: + return "" + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go index bfcc15769..7dfa1e594 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go @@ -23,20 +23,14 @@ type ( ) type explicitAccess struct { - //nolint:structcheck accessPermissions accessMask - //nolint:structcheck - accessMode accessMode - //nolint:structcheck - inheritance inheritMode - //nolint:structcheck - trustee trustee + accessMode accessMode + inheritance inheritMode + trustee trustee } type trustee struct { - //nolint:unused,structcheck - multipleTrustee *trustee - //nolint:unused,structcheck + multipleTrustee *trustee multipleTrusteeOperation int32 trusteeForm trusteeForm trusteeType trusteeType diff --git a/vendor/github.com/containers/common/libimage/filters.go b/vendor/github.com/containers/common/libimage/filters.go index 441011edd..ff50321b7 100644 --- a/vendor/github.com/containers/common/libimage/filters.go +++ b/vendor/github.com/containers/common/libimage/filters.go @@ -178,7 +178,7 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp filter = filterManifest(ctx, manifest) case "reference": - filter = filterReferences(value) + filter = filterReferences(r, value) case "until": until, err := r.until(value) @@ -268,8 +268,15 @@ func filterManifest(ctx context.Context, value bool) filterFunc { } // filterReferences creates a reference filter for matching the specified value. -func filterReferences(value string) filterFunc { +func filterReferences(r *Runtime, value string) filterFunc { + lookedUp, _, _ := r.LookupImage(value, nil) return func(img *Image) (bool, error) { + if lookedUp != nil { + if lookedUp.ID() == img.ID() { + return true, nil + } + } + refs, err := img.NamesReferences() if err != nil { return false, err @@ -306,6 +313,7 @@ func filterReferences(value string) filterFunc { } } } + return false, nil } } diff --git a/vendor/github.com/containers/common/libimage/history.go b/vendor/github.com/containers/common/libimage/history.go index 46252df10..ad989b528 100644 --- a/vendor/github.com/containers/common/libimage/history.go +++ b/vendor/github.com/containers/common/libimage/history.go @@ -51,7 +51,6 @@ func (i *Image) History(ctx context.Context) ([]ImageHistory, error) { } if layer != nil { - history.Tags = layer.Names if !ociImage.History[x].EmptyLayer { history.Size = layer.UncompressedSize } @@ -64,8 +63,13 @@ func (i *Image) History(ctx context.Context) ([]ImageHistory, error) { history.ID = id usedIDs[id] = true } + for i := range node.images { + history.Tags = append(history.Tags, node.images[i].Names()...) + } } - if layer.Parent != "" && !ociImage.History[x].EmptyLayer { + if layer.Parent == "" { + layer = nil + } else if !ociImage.History[x].EmptyLayer { layer, err = i.runtime.store.Layer(layer.Parent) if err != nil { return nil, err diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go index 9090f035a..da4ff8b7a 100644 --- a/vendor/github.com/containers/common/libimage/image.go +++ b/vendor/github.com/containers/common/libimage/image.go @@ -402,7 +402,7 @@ func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveIma // have a closer look at the errors. On top, image removal should be // tolerant toward corrupted images. handleError := func(err error) error { - if errors.Is(err, storage.ErrImageUnknown) || errors.Is(err, storage.ErrNotAnImage) || errors.Is(err, storage.ErrLayerUnknown) { + if ErrorIsImageUnknown(err) { // The image or layers of the image may already have been removed // in which case we consider the image to be removed. return nil @@ -424,14 +424,12 @@ func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveIma numNames := len(i.Names()) // NOTE: the `numNames == 1` check is not only a performance - // optimization but also preserves exiting Podman/Docker behaviour. + // optimization but also preserves existing Podman/Docker behaviour. // If image "foo" is used by a container and has only this tag/name, // an `rmi foo` will not untag "foo" but instead attempt to remove the // entire image. If there's a container using "foo", we should get an // error. - if referencedBy == "" || numNames == 1 { - // DO NOTHING, the image will be removed - } else { + if !(referencedBy == "" || numNames == 1) { byID := strings.HasPrefix(i.ID(), referencedBy) byDigest := strings.HasPrefix(referencedBy, "sha256:") if !options.Force { @@ -737,7 +735,7 @@ func (i *Image) RepoDigests() ([]string, error) { // Mount the image with the specified mount options and label, both of which // are directly passed down to the containers storage. Returns the fully // evaluated path to the mount point. -func (i *Image) Mount(ctx context.Context, mountOptions []string, mountLabel string) (string, error) { +func (i *Image) Mount(_ context.Context, mountOptions []string, mountLabel string) (string, error) { if i.runtime.eventChannel != nil { defer i.runtime.writeEvent(&Event{ID: i.ID(), Name: "", Time: time.Now(), Type: EventTypeImageMount}) } diff --git a/vendor/github.com/containers/common/libimage/import.go b/vendor/github.com/containers/common/libimage/import.go index f557db626..6e739f93f 100644 --- a/vendor/github.com/containers/common/libimage/import.go +++ b/vendor/github.com/containers/common/libimage/import.go @@ -54,11 +54,13 @@ func (r *Runtime) Import(ctx context.Context, path string, options *ImportOption } config := v1.Image{ - Config: ic, - History: history, - OS: options.OS, - Architecture: options.Arch, - Variant: options.Variant, + Config: ic, + History: history, + Platform: v1.Platform{ + OS: options.OS, + Architecture: options.Arch, + Variant: options.Variant, + }, } u, err := url.ParseRequestURI(path) diff --git a/vendor/github.com/containers/common/libimage/layer_tree.go b/vendor/github.com/containers/common/libimage/layer_tree.go index a7d2f8c58..e6b012f90 100644 --- a/vendor/github.com/containers/common/libimage/layer_tree.go +++ b/vendor/github.com/containers/common/libimage/layer_tree.go @@ -2,8 +2,10 @@ package libimage import ( "context" + "errors" "github.com/containers/storage" + storageTypes "github.com/containers/storage/types" ociv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) @@ -30,7 +32,19 @@ func (t *layerTree) node(layerID string) *layerNode { return node } +// ErrorIsImageUnknown returns true if the specified error indicates that an +// image is unknown or has been partially removed (e.g., a missing layer). +func ErrorIsImageUnknown(err error) bool { + return errors.Is(err, storage.ErrImageUnknown) || + errors.Is(err, storageTypes.ErrLayerUnknown) || + errors.Is(err, storageTypes.ErrSizeUnknown) || + errors.Is(err, storage.ErrNotAnImage) +} + // toOCI returns an OCI image for the specified image. +// +// WARNING: callers are responsible for handling cases where the target image +// has been (partially) removed and can use `ErrorIsImageUnknown` to detect it. func (t *layerTree) toOCI(ctx context.Context, i *Image) (*ociv1.Image, error) { var err error oci, exists := t.ociCache[i.ID()] @@ -155,6 +169,9 @@ func (t *layerTree) children(ctx context.Context, parent *Image, all bool) ([]*I parentID := parent.ID() parentOCI, err := t.toOCI(ctx, parent) if err != nil { + if ErrorIsImageUnknown(err) { + return nil, nil + } return nil, err } @@ -165,6 +182,9 @@ func (t *layerTree) children(ctx context.Context, parent *Image, all bool) ([]*I } childOCI, err := t.toOCI(ctx, child) if err != nil { + if ErrorIsImageUnknown(err) { + return false, nil + } return false, err } // History check. @@ -255,6 +275,9 @@ func (t *layerTree) parent(ctx context.Context, child *Image) (*Image, error) { childID := child.ID() childOCI, err := t.toOCI(ctx, child) if err != nil { + if ErrorIsImageUnknown(err) { + return nil, nil + } return nil, err } @@ -268,6 +291,9 @@ func (t *layerTree) parent(ctx context.Context, child *Image) (*Image, error) { } emptyOCI, err := t.toOCI(ctx, empty) if err != nil { + if ErrorIsImageUnknown(err) { + return nil, nil + } return nil, err } // History check. @@ -300,6 +326,9 @@ func (t *layerTree) parent(ctx context.Context, child *Image) (*Image, error) { } parentOCI, err := t.toOCI(ctx, parent) if err != nil { + if ErrorIsImageUnknown(err) { + return nil, nil + } return nil, err } // History check. diff --git a/vendor/github.com/containers/common/libimage/manifest_list.go b/vendor/github.com/containers/common/libimage/manifest_list.go index bf1738a33..3a75709e0 100644 --- a/vendor/github.com/containers/common/libimage/manifest_list.go +++ b/vendor/github.com/containers/common/libimage/manifest_list.go @@ -388,10 +388,7 @@ func (m *ManifestList) AnnotateInstance(d digest.Digest, options *ManifestListAn } // Write the changes to disk. - if err := m.saveAndReload(); err != nil { - return err - } - return nil + return m.saveAndReload() } // RemoveInstance removes the instance specified by `d` from the manifest list. @@ -402,10 +399,7 @@ func (m *ManifestList) RemoveInstance(d digest.Digest) error { } // Write the changes to disk. - if err := m.saveAndReload(); err != nil { - return err - } - return nil + return m.saveAndReload() } // ManifestListPushOptions allow for customizing pushing a manifest list. diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go index 0f3c1d711..7a51b8423 100644 --- a/vendor/github.com/containers/common/libimage/manifests/manifests.go +++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go @@ -423,7 +423,7 @@ func (l *list) Remove(instanceDigest digest.Digest) error { // then use that list's SaveToImage() method to save a modified version of the // list to that image record use this lock to avoid accidentally wiping out // changes that another process is also attempting to make. -func LockerForImage(store storage.Store, image string) (lockfile.Locker, error) { +func LockerForImage(store storage.Store, image string) (lockfile.Locker, error) { // nolint:staticcheck img, err := store.Image(image) if err != nil { return nil, fmt.Errorf("locating image %q for locating lock: %w", image, err) diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go index 4a0f5970d..188ecb5ef 100644 --- a/vendor/github.com/containers/common/libimage/pull.go +++ b/vendor/github.com/containers/common/libimage/pull.go @@ -442,8 +442,17 @@ func (r *Runtime) imagesIDsForManifest(manifestBytes []byte, sys *types.SystemCo if err != nil { return nil, fmt.Errorf("listing images by manifest digest: %w", err) } - results := make([]string, 0, len(images)) + + // If you have additionStores defined and the same image stored in + // both storage and additional store, it can be output twice. + // Fixes github.com/containers/podman/issues/18647 + results := []string{} + imageMap := map[string]bool{} for _, image := range images { + if imageMap[image.ID] { + continue + } + imageMap[image.ID] = true results = append(results, image.ID) } if len(results) == 0 { diff --git a/vendor/github.com/containers/common/libnetwork/types/const.go b/vendor/github.com/containers/common/libnetwork/types/const.go index e367f9ad3..83103ef6e 100644 --- a/vendor/github.com/containers/common/libnetwork/types/const.go +++ b/vendor/github.com/containers/common/libnetwork/types/const.go @@ -36,11 +36,13 @@ const ( IPVLANModeL3s = "l3s" // valid network options - VLANOption = "vlan" - MTUOption = "mtu" - ModeOption = "mode" - IsolateOption = "isolate" - MetricOption = "metric" + VLANOption = "vlan" + MTUOption = "mtu" + ModeOption = "mode" + IsolateOption = "isolate" + MetricOption = "metric" + NoDefaultRoute = "no_default_route" + BclimOption = "bclim" ) type NetworkBackend string diff --git a/vendor/github.com/containers/common/libnetwork/types/define.go b/vendor/github.com/containers/common/libnetwork/types/define.go index f84221458..6e91ccda9 100644 --- a/vendor/github.com/containers/common/libnetwork/types/define.go +++ b/vendor/github.com/containers/common/libnetwork/types/define.go @@ -3,7 +3,8 @@ package types import ( "errors" "fmt" - "regexp" + + "github.com/containers/storage/pkg/regexp" ) var ( @@ -19,7 +20,11 @@ var ( // NameRegex is a regular expression to validate names. // This must NOT be changed. - NameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$") + NameRegex = regexp.Delayed("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$") // RegexError is thrown in presence of an invalid name. RegexError = fmt.Errorf("names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*: %w", ErrInvalidArg) // nolint:revive // This lint is new and we do not want to break the API. + + // NotHexRegex is a regular expression to check if a string is + // a hexadecimal string. + NotHexRegex = regexp.Delayed(`[^0-9a-fA-F]`) ) diff --git a/vendor/github.com/containers/common/libnetwork/types/network.go b/vendor/github.com/containers/common/libnetwork/types/network.go index b8804bf6b..94087fd37 100644 --- a/vendor/github.com/containers/common/libnetwork/types/network.go +++ b/vendor/github.com/containers/common/libnetwork/types/network.go @@ -34,6 +34,10 @@ type ContainerNetwork interface { // DefaultNetworkName will return the default network name // for this interface. DefaultNetworkName() string + + // NetworkInfo return the network information about backend type, + // binary path, package version and so on. + NetworkInfo() NetworkInfo } // Network describes the Network attributes. @@ -50,6 +54,8 @@ type Network struct { Created time.Time `json:"created,omitempty"` // Subnets to use for this network. Subnets []Subnet `json:"subnets,omitempty"` + // Routes to use for this network. + Routes []Route `json:"routes,omitempty"` // IPv6Enabled if set to true an ipv6 subnet should be created for this net. IPv6Enabled bool `json:"ipv6_enabled"` // Internal is whether the Network should not have external routes @@ -80,6 +86,22 @@ type NetworkUpdateOptions struct { RemoveDNSServers []string `json:"remove_dns_servers,omitempty"` } +// NetworkInfo contains the network information. +type NetworkInfo struct { + Backend NetworkBackend `json:"backend"` + Version string `json:"version,omitempty"` + Package string `json:"package,omitempty"` + Path string `json:"path,omitempty"` + DNS DNSNetworkInfo `json:"dns,omitempty"` +} + +// NetworkInfo contains the DNS information. +type DNSNetworkInfo struct { + Version string `json:"version,omitempty"` + Package string `json:"package,omitempty"` + Path string `json:"path,omitempty"` +} + // IPNet is used as custom net.IPNet type to add Marshal/Unmarshal methods. type IPNet struct { net.IPNet @@ -169,6 +191,17 @@ type Subnet struct { LeaseRange *LeaseRange `json:"lease_range,omitempty"` } +type Route struct { + // Destination for this route in CIDR form. + // swagger:strfmt string + Destination IPNet `json:"destination"` + // Gateway IP for this route. + // swagger:strfmt string + Gateway net.IP `json:"gateway"` + // Metric for this route. Optional. + Metric *uint32 `json:"metric,omitempty"` +} + // LeaseRange contains the range where IP are leased. type LeaseRange struct { // StartIP first IP in the subnet which should be used to assign ips. diff --git a/vendor/github.com/containers/common/libnetwork/util/filters.go b/vendor/github.com/containers/common/libnetwork/util/filters.go index 2f1e4a21f..782c5d2b9 100644 --- a/vendor/github.com/containers/common/libnetwork/util/filters.go +++ b/vendor/github.com/containers/common/libnetwork/util/filters.go @@ -38,7 +38,7 @@ func createFilterFuncs(key string, filterValues []string) (types.FilterFunc, err case "id": // matches part of one id return func(net types.Network) bool { - return util.StringMatchRegexSlice(net.ID, filterValues) + return util.FilterID(net.ID, filterValues) }, nil // TODO: add dns enabled, internal filter diff --git a/vendor/github.com/containers/common/libnetwork/util/ip.go b/vendor/github.com/containers/common/libnetwork/util/ip.go index 7c315e312..1e426926e 100644 --- a/vendor/github.com/containers/common/libnetwork/util/ip.go +++ b/vendor/github.com/containers/common/libnetwork/util/ip.go @@ -54,3 +54,25 @@ func NormalizeIP(ip *net.IP) { *ip = ipv4 } } + +// GetLocalIP returns the first non loopback local IPv4 of the host. +// If no ipv4 address is found it may return an ipv6 address. +// When no ip is found and empty string is returned. +func GetLocalIP() string { + addrs, err := net.InterfaceAddrs() + if err != nil { + return "" + } + ip := "" + for _, address := range addrs { + // check the address type and if it is not a loopback the display it + if ipnet, ok := address.(*net.IPNet); ok && ipnet.IP.IsGlobalUnicast() { + if IsIPv4(ipnet.IP) { + return ipnet.IP.String() + } + // if ipv6 we keep looking for an ipv4 address + ip = ipnet.IP.String() + } + } + return ip +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go b/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go index a4cc2acaf..c55c76864 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go @@ -52,6 +52,6 @@ func (c *linuxCpusetHandler) Destroy(ctr *CgroupControl) error { } // Stat fills a metrics structure with usage stats for the controller -func (c *linuxCpusetHandler) Stat(ctr *CgroupControl, m *cgroups.Stats) error { +func (c *linuxCpusetHandler) Stat(_ *CgroupControl, _ *cgroups.Stats) error { return nil } diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go index 140f10651..a408b4fd4 100644 --- a/vendor/github.com/containers/common/pkg/config/config.go +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -143,6 +143,12 @@ type ContainersConfig struct { // Labeling to separate containers (SELinux) EnableLabeling bool `toml:"label,omitempty"` + // EnableLabeledUsers indicates whether to enforce confined users with + // containers on SELinux systems. This option causes containers to + // maintain the current user and role field of the calling process. + // Otherwise containers run with user system_u, and the role system_r. + EnableLabeledUsers bool `toml:"label_users,omitempty"` + // Env is the environment variable list for container process. Env []string `toml:"env,omitempty"` @@ -504,6 +510,9 @@ type EngineConfig struct { // CompressionFormat is the compression format used to compress image layers. CompressionFormat string `toml:"compression_format,omitempty"` + + // CompressionLevel is the compression level used to compress image layers. + CompressionLevel *int `toml:"compression_level,omitempty"` } // SetOptions contains a subset of options in a Config. It's used to indicate if @@ -578,6 +587,10 @@ type NetworkConfig struct { // are always assigned randomly. DefaultSubnetPools []SubnetPool `toml:"default_subnet_pools,omitempty"` + // DefaultRootlessNetworkCmd is used to set the default rootless network + // program, either "slirp4nents" (default) or "pasta". + DefaultRootlessNetworkCmd string `toml:"default_rootless_network_cmd,omitempty"` + // NetworkConfigDir is where network configuration files are stored. NetworkConfigDir string `toml:"network_config_dir,omitempty"` @@ -585,6 +598,10 @@ type NetworkConfig struct { // for netavark rootful bridges with dns enabled. This can be necessary // when other dns forwarders run on the machine. 53 is used if unset. DNSBindPort uint16 `toml:"dns_bind_port,omitempty,omitzero"` + + // PastaOptions contains a default list of pasta(1) options that should + // be used when running pasta. + PastaOptions []string `toml:"pasta_options,omitempty"` } type SubnetPool struct { diff --git a/vendor/github.com/containers/common/pkg/config/config_darwin.go b/vendor/github.com/containers/common/pkg/config/config_darwin.go index 13bd3a376..18c466210 100644 --- a/vendor/github.com/containers/common/pkg/config/config_darwin.go +++ b/vendor/github.com/containers/common/pkg/config/config_darwin.go @@ -30,9 +30,9 @@ func ifRootlessConfigPath() (string, error) { var defaultHelperBinariesDir = []string{ // Homebrew install paths - "/usr/local/opt/podman/libexec", + "/usr/local/opt/podman/libexec/podman", + "/opt/homebrew/opt/podman/libexec/podman", "/opt/homebrew/bin", - "/opt/homebrew/opt/podman/libexec", "/usr/local/bin", // default paths "/usr/local/libexec/podman", diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf index 5d6e2efe3..ec881f2fc 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf +++ b/vendor/github.com/containers/common/pkg/config/containers.conf @@ -173,6 +173,12 @@ default_sysctls = [ # #label = true +# label_users indicates whether to enforce confined users in containers on +# SELinux systems. This option causes containers to maintain the current user +# and role field of the calling process. By default SELinux containers run with +# the user system_u, and the role system_r. +#label_users = false + # Logging driver for the container. Available options: k8s-file and journald. # #log_driver = "k8s-file" @@ -308,9 +314,9 @@ default_sysctls = [ # #netavark_plugin_dirs = [ # "/usr/local/libexec/netavark", -# "/usr/libexec/netavark", -# "/usr/local/lib/netavark", -# "/usr/lib/netavark", +# "/usr/libexec/netavark", +# "/usr/local/lib/netavark", +# "/usr/lib/netavark", #] # The network name of the default network to attach pods to. @@ -338,6 +344,13 @@ default_sysctls = [ # {"base" = "10.128.0.0/9", "size" = 24}, #] + + +# Configure which rootless network program to use by default. Valid options are +# `slirp4netns` (default) and `pasta`. +# +#default_rootless_network_cmd = "slirp4netns" + # Path to the directory where network configuration files are located. # For the CNI backend the default is "/etc/cni/net.d" as root # and "$HOME/.config/cni/net.d" as rootless. @@ -353,16 +366,27 @@ default_sysctls = [ # #dns_bind_port = 53 +# A list of default pasta options that should be used running pasta. +# It accepts the pasta cli options, see pasta(1) for the full list of options. +# +#pasta_options = [] + [engine] # Index to the active service # -#active_service = production +#active_service = "production" # The compression format to use when pushing an image. # Valid options are: `gzip`, `zstd` and `zstd:chunked`. # #compression_format = "gzip" +# The compression level to use when pushing an image. +# Valid options depend on the compression format used. +# For gzip, valid options are 1-9, with a default of 5. +# For zstd, valid options are 1-20, with a default of 3. +# +#compression_level = 5 # Cgroup management implementation used for the runtime. # Valid options "systemd" or "cgroupfs" @@ -401,7 +425,7 @@ default_sysctls = [ # Format is a single character [a-Z] or a comma separated sequence of # `ctrl-`, where `` is one of: # `a-z`, `@`, `^`, `[`, `\`, `]`, `^` or `_` -# +# Specifying "" disables this feature. #detach_keys = "ctrl-p,ctrl-q" # Determines whether engine will reserve ports on the host when they are @@ -504,13 +528,13 @@ default_sysctls = [ # faster "shm" lock type. You may need to run "podman system renumber" after # you change the lock type. # -#lock_type** = "shm" +#lock_type = "shm" # MultiImageArchive - if true, the container engine allows for storing archives # (e.g., of the docker-archive transport) with multiple images. By default, # Podman creates single-image archives. # -#multi_image_archive = "false" +#multi_image_archive = false # Default engine namespace # If engine is joined to a namespace, it will see only containers and pods @@ -615,8 +639,8 @@ default_sysctls = [ # map of service destinations # -# [service_destinations] -# [service_destinations.production] +# [engine.service_destinations] +# [engine.service_destinations.production] # URI to access the Podman service # Examples: # rootless "unix://run/user/$UID/podman/podman.sock" (Default) diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd index 7fe7538a1..5e187893b 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd +++ b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd @@ -311,6 +311,13 @@ default_sysctls = [ # #compression_format = "gzip" +# The compression level to use when pushing an image. +# Valid options depend on the compression format used. +# For gzip, valid options are 1-9, with a default of 5. +# For zstd, valid options are 1-20, with a default of 3. +# +#compression_level = 5 + # Environment variables to pass into conmon # #conmon_env_vars = [ diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go index 28249e80e..127920997 100644 --- a/vendor/github.com/containers/common/pkg/config/default.go +++ b/vendor/github.com/containers/common/pkg/config/default.go @@ -32,8 +32,6 @@ const ( ) var ( - // DefaultInitPath is the default path to the container-init binary. - DefaultInitPath = "/usr/libexec/podman/catatonit" // DefaultInfraImage is the default image to run as infrastructure containers in pods. DefaultInfraImage = "" // DefaultRootlessSHMLockPath is the default path for rootless SHM locks. @@ -215,12 +213,13 @@ func DefaultConfig() (*Config, error) { UserNSSize: DefaultUserNSSize, // Deprecated }, Network: NetworkConfig{ - DefaultNetwork: "podman", - DefaultSubnet: DefaultSubnet, - DefaultSubnetPools: DefaultSubnetPools, - DNSBindPort: 0, - CNIPluginDirs: DefaultCNIPluginDirs, - NetavarkPluginDirs: DefaultNetavarkPluginDirs, + DefaultNetwork: "podman", + DefaultSubnet: DefaultSubnet, + DefaultSubnetPools: DefaultSubnetPools, + DefaultRootlessNetworkCmd: "slirp4netns", + DNSBindPort: 0, + CNIPluginDirs: DefaultCNIPluginDirs, + NetavarkPluginDirs: DefaultNetavarkPluginDirs, }, Engine: *defaultEngineConfig, Secrets: defaultSecretConfig(), @@ -283,6 +282,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) { c.VolumePath = filepath.Join(storeOpts.GraphRoot, "volumes") c.VolumePluginTimeout = DefaultVolumePluginTimeout + c.CompressionFormat = "gzip" c.HelperBinariesDir = defaultHelperBinariesDir if additionalHelperBinariesDir != "" { diff --git a/vendor/github.com/containers/common/pkg/config/default_common.go b/vendor/github.com/containers/common/pkg/config/default_common.go new file mode 100644 index 000000000..f65461043 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/default_common.go @@ -0,0 +1,7 @@ +//go:build !freebsd +// +build !freebsd + +package config + +// DefaultInitPath is the default path to the container-init binary. +var DefaultInitPath = "/usr/libexec/podman/catatonit" diff --git a/vendor/github.com/containers/common/pkg/config/default_freebsd.go b/vendor/github.com/containers/common/pkg/config/default_freebsd.go index f3c999bed..637abf981 100644 --- a/vendor/github.com/containers/common/pkg/config/default_freebsd.go +++ b/vendor/github.com/containers/common/pkg/config/default_freebsd.go @@ -1,5 +1,8 @@ package config +// DefaultInitPath is the default path to the container-init binary. +var DefaultInitPath = "/usr/local/libexec/podman/catatonit" + func getDefaultCgroupsMode() string { return "enabled" } diff --git a/vendor/github.com/containers/common/pkg/manifests/manifests.go b/vendor/github.com/containers/common/pkg/manifests/manifests.go index d351bdf17..8296faa82 100644 --- a/vendor/github.com/containers/common/pkg/manifests/manifests.go +++ b/vendor/github.com/containers/common/pkg/manifests/manifests.go @@ -73,7 +73,8 @@ func Create() List { // AddInstance adds an entry for the specified manifest digest, with assorted // additional information specified in parameters, to the list or index. -func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, osName, architecture, osVersion string, osFeatures []string, variant string, features, annotations []string) error { +func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, osName, architecture, osVersion string, osFeatures []string, variant string, features, annotations []string) error { // nolint:revive + // FIXME: the annotations argument is currently ignored if err := l.Remove(manifestDigest); err != nil && !errors.Is(err, os.ErrNotExist) { return err } diff --git a/vendor/github.com/containers/common/pkg/retry/retry.go b/vendor/github.com/containers/common/pkg/retry/retry.go index a838c706a..5cf311b43 100644 --- a/vendor/github.com/containers/common/pkg/retry/retry.go +++ b/vendor/github.com/containers/common/pkg/retry/retry.go @@ -17,8 +17,9 @@ import ( // Options defines the option to retry. type Options struct { - MaxRetry int // The number of times to possibly retry. - Delay time.Duration // The delay to use between retries, if set. + MaxRetry int // The number of times to possibly retry. + Delay time.Duration // The delay to use between retries, if set. + IsErrorRetryable func(error) bool } // RetryOptions is deprecated, use Options. @@ -31,6 +32,12 @@ func RetryIfNecessary(ctx context.Context, operation func() error, options *Opti // IfNecessary retries the operation in exponential backoff with the retry Options. func IfNecessary(ctx context.Context, operation func() error, options *Options) error { + var isRetryable func(error) bool + if options.IsErrorRetryable != nil { + isRetryable = options.IsErrorRetryable + } else { + isRetryable = IsErrorRetryable + } err := operation() for attempt := 0; err != nil && isRetryable(err) && attempt < options.MaxRetry; attempt++ { delay := time.Duration(int(math.Pow(2, float64(attempt)))) * time.Second @@ -49,7 +56,11 @@ func IfNecessary(ctx context.Context, operation func() error, options *Options) return err } -func isRetryable(err error) bool { +// IsErrorRetryable makes a HEURISTIC determination whether it is worth retrying upon encountering an error. +// That heuristic is NOT STABLE and it CAN CHANGE AT ANY TIME. +// Callers that have a hard requirement for specific treatment of a class of errors should make their own check +// instead of relying on this function maintaining its past behavior. +func IsErrorRetryable(err error) bool { switch err { case nil: return false @@ -72,18 +83,18 @@ func isRetryable(err error) bool { } return true case *net.OpError: - return isRetryable(e.Err) + return IsErrorRetryable(e.Err) case *url.Error: // This includes errors returned by the net/http client. if e.Err == io.EOF { // Happens when a server accepts a HTTP connection and sends EOF return true } - return isRetryable(e.Err) + return IsErrorRetryable(e.Err) case syscall.Errno: return isErrnoRetryable(e) case errcode.Errors: // if this error is a group of errors, process them all in turn for i := range e { - if !isRetryable(e[i]) { + if !IsErrorRetryable(e[i]) { return false } } @@ -91,7 +102,7 @@ func isRetryable(err error) bool { case *multierror.Error: // if this error is a group of errors, process them all in turn for i := range e.Errors { - if !isRetryable(e.Errors[i]) { + if !IsErrorRetryable(e.Errors[i]) { return false } } @@ -102,11 +113,11 @@ func isRetryable(err error) bool { } if unwrappable, ok := e.(unwrapper); ok { err = unwrappable.Unwrap() - return isRetryable(err) + return IsErrorRetryable(err) } case unwrapper: // Test this last, because various error types might implement .Unwrap() err = e.Unwrap() - return isRetryable(err) + return IsErrorRetryable(err) } return false diff --git a/vendor/github.com/containers/common/pkg/supplemented/supplemented.go b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go index 58c9af654..6ae9a4160 100644 --- a/vendor/github.com/containers/common/pkg/supplemented/supplemented.go +++ b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go @@ -280,7 +280,7 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty return iss, nil } -func (s *supplementedImageReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { +func (s *supplementedImageReference) DeleteImage(_ context.Context, _ *types.SystemContext) error { return fmt.Errorf("deletion of images not implemented") } diff --git a/vendor/github.com/containers/common/pkg/util/util.go b/vendor/github.com/containers/common/pkg/util/util.go index 98890a686..e396f0fc0 100644 --- a/vendor/github.com/containers/common/pkg/util/util.go +++ b/vendor/github.com/containers/common/pkg/util/util.go @@ -1,6 +1,101 @@ package util -import "regexp" +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/containers/common/libnetwork/types" + "github.com/fsnotify/fsnotify" + "github.com/sirupsen/logrus" +) + +const ( + UnknownPackage = "Unknown" +) + +// Note: This function is copied from containers/podman libpod/util.go +// Please see https://github.com/containers/common/pull/1460 +func queryPackageVersion(cmdArg ...string) string { + output := UnknownPackage + if 1 < len(cmdArg) { + cmd := exec.Command(cmdArg[0], cmdArg[1:]...) + if outp, err := cmd.Output(); err == nil { + output = string(outp) + if cmdArg[0] == "/usr/bin/dpkg" { + r := strings.Split(output, ": ") + queryFormat := `${Package}_${Version}_${Architecture}` + cmd = exec.Command("/usr/bin/dpkg-query", "-f", queryFormat, "-W", r[0]) + if outp, err := cmd.Output(); err == nil { + output = string(outp) + } + } + } + if cmdArg[0] == "/sbin/apk" { + prefix := cmdArg[len(cmdArg)-1] + " is owned by " + output = strings.Replace(output, prefix, "", 1) + } + } + return strings.Trim(output, "\n") +} + +// Note: This function is copied from containers/podman libpod/util.go +// Please see https://github.com/containers/common/pull/1460 +func PackageVersion(program string) string { // program is full path + packagers := [][]string{ + {"/usr/bin/rpm", "-q", "-f"}, + {"/usr/bin/dpkg", "-S"}, // Debian, Ubuntu + {"/usr/bin/pacman", "-Qo"}, // Arch + {"/usr/bin/qfile", "-qv"}, // Gentoo (quick) + {"/usr/bin/equery", "b"}, // Gentoo (slow) + {"/sbin/apk", "info", "-W"}, // Alpine + {"/usr/local/sbin/pkg", "which", "-q"}, // FreeBSD + } + + for _, cmd := range packagers { + cmd = append(cmd, program) + if out := queryPackageVersion(cmd...); out != UnknownPackage { + return out + } + } + return UnknownPackage +} + +// Note: This function is copied from containers/podman libpod/util.go +// Please see https://github.com/containers/common/pull/1460 +func ProgramVersion(program string) (string, error) { + return programVersion(program, false) +} + +func ProgramVersionDnsname(program string) (string, error) { + return programVersion(program, true) +} + +func programVersion(program string, dnsname bool) (string, error) { + cmd := exec.Command(program, "--version") + var stdout bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + if err != nil { + return "", fmt.Errorf("`%v --version` failed: %v %v (%v)", program, stderr.String(), stdout.String(), err) + } + + output := strings.TrimSuffix(stdout.String(), "\n") + // dnsname --version returns the information to stderr + if dnsname { + output = strings.TrimSuffix(stderr.String(), "\n") + } + + return output, nil +} // StringInSlice determines if a string is in a string slice, returns bool func StringInSlice(s string, sl []string) bool { @@ -22,3 +117,73 @@ func StringMatchRegexSlice(s string, re []string) bool { } return false } + +// FilterID is a function used to compare an id against a set of ids, if the +// input is hex we check if the prefix matches. Otherwise we assume it is a +// regex and try to match that. +// see https://github.com/containers/podman/issues/18471 for why we do this +func FilterID(id string, filters []string) bool { + for _, want := range filters { + isRegex := types.NotHexRegex.MatchString(want) + if isRegex { + match, err := regexp.MatchString(want, id) + if err == nil && match { + return true + } + } else if strings.HasPrefix(id, strings.ToLower(want)) { + return true + } + } + return false +} + +// WaitForFile waits until a file has been created or the given timeout has occurred +func WaitForFile(path string, chWait chan error, timeout time.Duration) (bool, error) { + var inotifyEvents chan fsnotify.Event + watcher, err := fsnotify.NewWatcher() + if err == nil { + if err := watcher.Add(filepath.Dir(path)); err == nil { + inotifyEvents = watcher.Events + } + defer func() { + if err := watcher.Close(); err != nil { + logrus.Errorf("Failed to close fsnotify watcher: %v", err) + } + }() + } + + var timeoutChan <-chan time.Time + + if timeout != 0 { + timeoutChan = time.After(timeout) + } + + for { + select { + case e := <-chWait: + return true, e + case <-inotifyEvents: + _, err := os.Stat(path) + if err == nil { + return false, nil + } + if !os.IsNotExist(err) { + return false, err + } + case <-time.After(25 * time.Millisecond): + // Check periodically for the file existence. It is needed + // if the inotify watcher could not have been created. It is + // also useful when using inotify as if for any reasons we missed + // a notification, we won't hang the process. + _, err := os.Stat(path) + if err == nil { + return false, nil + } + if !os.IsNotExist(err) { + return false, err + } + case <-timeoutChan: + return false, fmt.Errorf("timed out waiting for file %s", path) + } + } +} diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go index 8bd54871c..f4b68b44b 100644 --- a/vendor/github.com/containers/common/version/version.go +++ b/vendor/github.com/containers/common/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "0.53.0" +const Version = "0.54.0" diff --git a/vendor/github.com/containers/image/v5/copy/blob.go b/vendor/github.com/containers/image/v5/copy/blob.go index 96674ddbb..f45b97f56 100644 --- a/vendor/github.com/containers/image/v5/copy/blob.go +++ b/vendor/github.com/containers/image/v5/copy/blob.go @@ -43,7 +43,7 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read stream.reader = bar.ProxyReader(stream.reader) // === Decrypt the stream, if required. - decryptionStep, err := ic.c.blobPipelineDecryptionStep(&stream, srcInfo) + decryptionStep, err := ic.blobPipelineDecryptionStep(&stream, srcInfo) if err != nil { return types.BlobInfo{}, err } @@ -78,7 +78,7 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read // Before relaxing this, see the original pull request’s review if there are other reasons to reject this. return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy") } - encryptionStep, err := ic.c.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep) + encryptionStep, err := ic.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep) if err != nil { return types.BlobInfo{}, err } diff --git a/vendor/github.com/containers/image/v5/copy/encryption.go b/vendor/github.com/containers/image/v5/copy/encryption.go index 54aca9e57..86fadff66 100644 --- a/vendor/github.com/containers/image/v5/copy/encryption.go +++ b/vendor/github.com/containers/image/v5/copy/encryption.go @@ -33,28 +33,33 @@ type bpDecryptionStepData struct { // blobPipelineDecryptionStep updates *stream to decrypt if, it necessary. // srcInfo is only used for error messages. // Returns data for other steps; the caller should eventually use updateCryptoOperation. -func (c *copier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) { - if isOciEncrypted(stream.info.MediaType) && c.ociDecryptConfig != nil { - desc := imgspecv1.Descriptor{ - Annotations: stream.info.Annotations, - } - reader, decryptedDigest, err := ocicrypt.DecryptLayer(c.ociDecryptConfig, stream.reader, desc, false) - if err != nil { - return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err) - } - - stream.reader = reader - stream.info.Digest = decryptedDigest - stream.info.Size = -1 - maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool { - return strings.HasPrefix(k, "org.opencontainers.image.enc") - }) +func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) { + if !isOciEncrypted(stream.info.MediaType) || ic.c.ociDecryptConfig == nil { return &bpDecryptionStepData{ - decrypting: true, + decrypting: false, }, nil } + + if ic.cannotModifyManifestReason != "" { + return nil, fmt.Errorf("layer %s should be decrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason) + } + + desc := imgspecv1.Descriptor{ + Annotations: stream.info.Annotations, + } + reader, decryptedDigest, err := ocicrypt.DecryptLayer(ic.c.ociDecryptConfig, stream.reader, desc, false) + if err != nil { + return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err) + } + + stream.reader = reader + stream.info.Digest = decryptedDigest + stream.info.Size = -1 + maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool { + return strings.HasPrefix(k, "org.opencontainers.image.enc") + }) return &bpDecryptionStepData{ - decrypting: false, + decrypting: true, }, nil } @@ -74,34 +79,39 @@ type bpEncryptionStepData struct { // blobPipelineEncryptionStep updates *stream to encrypt if, it required by toEncrypt. // srcInfo is primarily used for error messages. // Returns data for other steps; the caller should eventually call updateCryptoOperationAndAnnotations. -func (c *copier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo, +func (ic *imageCopier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo, decryptionStep *bpDecryptionStepData) (*bpEncryptionStepData, error) { - if toEncrypt && !isOciEncrypted(srcInfo.MediaType) && c.ociEncryptConfig != nil { - var annotations map[string]string - if !decryptionStep.decrypting { - annotations = srcInfo.Annotations - } - desc := imgspecv1.Descriptor{ - MediaType: srcInfo.MediaType, - Digest: srcInfo.Digest, - Size: srcInfo.Size, - Annotations: annotations, - } - reader, finalizer, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, stream.reader, desc) - if err != nil { - return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err) - } - - stream.reader = reader - stream.info.Digest = "" - stream.info.Size = -1 + if !toEncrypt || isOciEncrypted(srcInfo.MediaType) || ic.c.ociEncryptConfig == nil { return &bpEncryptionStepData{ - encrypting: true, - finalizer: finalizer, + encrypting: false, }, nil } + + if ic.cannotModifyManifestReason != "" { + return nil, fmt.Errorf("layer %s should be encrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason) + } + + var annotations map[string]string + if !decryptionStep.decrypting { + annotations = srcInfo.Annotations + } + desc := imgspecv1.Descriptor{ + MediaType: srcInfo.MediaType, + Digest: srcInfo.Digest, + Size: srcInfo.Size, + Annotations: annotations, + } + reader, finalizer, err := ocicrypt.EncryptLayer(ic.c.ociEncryptConfig, stream.reader, desc) + if err != nil { + return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err) + } + + stream.reader = reader + stream.info.Digest = "" + stream.info.Size = -1 return &bpEncryptionStepData{ - encrypting: false, + encrypting: true, + finalizer: finalizer, }, nil } diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go index a35ea4220..6f01cf5cc 100644 --- a/vendor/github.com/containers/image/v5/copy/manifest.go +++ b/vendor/github.com/containers/image/v5/copy/manifest.go @@ -9,6 +9,7 @@ import ( "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" + v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" "golang.org/x/exp/slices" ) @@ -18,6 +19,9 @@ import ( // Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} +// ociEncryptionMIMETypes lists manifest MIME types that are known to support OCI encryption. +var ociEncryptionMIMETypes = []string{v1.MediaTypeImageManifest} + // orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once. type orderedSet struct { list []string @@ -76,11 +80,14 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType} } - if len(destSupportedManifestMIMETypes) == 0 && (!in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType)) { - return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions. - preferredMIMEType: srcType, - otherMIMETypeCandidates: []string{}, - }, nil + if len(destSupportedManifestMIMETypes) == 0 { + if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType) { + return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions. + preferredMIMEType: srcType, + otherMIMETypeCandidates: []string{}, + }, nil + } + destSupportedManifestMIMETypes = ociEncryptionMIMETypes } supportedByDest := set.New[string]() for _, t := range destSupportedManifestMIMETypes { @@ -88,6 +95,27 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest supportedByDest.Add(t) } } + if supportedByDest.Empty() { + if len(destSupportedManifestMIMETypes) == 0 { // Coverage: This should never happen, empty values were replaced by ociEncryptionMIMETypes + return manifestConversionPlan{}, errors.New("internal error: destSupportedManifestMIMETypes is empty") + } + // We know, and have verified, that destSupportedManifestMIMETypes is not empty, so encryption must have been involved. + if !in.requiresOCIEncryption { // Coverage: This should never happen, destSupportedManifestMIMETypes was not empty, so we should have filtered for encryption. + return manifestConversionPlan{}, errors.New("internal error: supportedByDest is empty but destSupportedManifestMIMETypes is not, and not encrypting") + } + // destSupportedManifestMIMETypes has three possible origins: + if in.forceManifestMIMEType != "" { // 1. forceManifestType specified + return manifestConversionPlan{}, fmt.Errorf("encryption required together with format %s, which does not support encryption", + in.forceManifestMIMEType) + } + if len(in.destSupportedManifestMIMETypes) == 0 { // 2. destination accepts anything and we have chosen ociEncryptionMIMETypes + // Coverage: This should never happen, ociEncryptionMIMETypes all support encryption + return manifestConversionPlan{}, errors.New("internal error: in.destSupportedManifestMIMETypes is empty but supportedByDest is empty as well") + } + // 3. destination does not support encryption. + return manifestConversionPlan{}, fmt.Errorf("encryption required but the destination only supports MIME types [%s], none of which support encryption", + strings.Join(destSupportedManifestMIMETypes, ", ")) + } // destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types. // So, build a list of types to try in order of decreasing preference. @@ -122,11 +150,13 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest // Finally, try anything else the destination supports. for _, t := range destSupportedManifestMIMETypes { - prioritizedTypes.append(t) + if supportedByDest.Contains(t) { + prioritizedTypes.append(t) + } } logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) - if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen. + if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes and supportedByDest, which is a subset, is not empty (or we would have exited above), so this should never happen. return manifestConversionPlan{}, errors.New("Internal error: no candidate MIME types") } res := manifestConversionPlan{ diff --git a/vendor/github.com/containers/image/v5/copy/multiple.go b/vendor/github.com/containers/image/v5/copy/multiple.go index 097a18855..41ea1b11b 100644 --- a/vendor/github.com/containers/image/v5/copy/multiple.go +++ b/vendor/github.com/containers/image/v5/copy/multiple.go @@ -12,11 +12,41 @@ import ( internalManifest "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/signature" + digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" "golang.org/x/exp/slices" ) +type instanceCopyKind int + +const ( + instanceCopyCopy instanceCopyKind = iota + instanceCopyClone +) + +type instanceCopy struct { + op instanceCopyKind + sourceDigest digest.Digest +} + +// prepareInstanceCopies prepares a list of instances which needs to copied to the manifest list. +func prepareInstanceCopies(instanceDigests []digest.Digest, options *Options) []instanceCopy { + res := []instanceCopy{} + for i, instanceDigest := range instanceDigests { + if options.ImageListSelection == CopySpecificImages && + !slices.Contains(options.Instances, instanceDigest) { + logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) + continue + } + res = append(res, instanceCopy{ + op: instanceCopyCopy, + sourceDigest: instanceDigest, + }) + } + return res +} + // copyMultipleImages copies some or all of an image list's instances, using // policyContext to validate source image admissibility. func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, retErr error) { @@ -88,44 +118,35 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur // Copy each image, or just the ones we want to copy, in turn. instanceDigests := updatedList.Instances() - imagesToCopy := len(instanceDigests) - if options.ImageListSelection == CopySpecificImages { - imagesToCopy = len(options.Instances) - } - c.Printf("Copying %d of %d images in list\n", imagesToCopy, len(instanceDigests)) - updates := make([]manifest.ListUpdate, len(instanceDigests)) - instancesCopied := 0 - for i, instanceDigest := range instanceDigests { - if options.ImageListSelection == CopySpecificImages && - !slices.Contains(options.Instances, instanceDigest) { - update, err := updatedList.Instance(instanceDigest) + instanceEdits := []internalManifest.ListEdit{} + instanceCopyList := prepareInstanceCopies(instanceDigests, options) + c.Printf("Copying %d of %d images in list\n", len(instanceCopyList), len(instanceDigests)) + for i, instance := range instanceCopyList { + // Update instances to be edited by their `ListOperation` and + // populate necessary fields. + switch instance.op { + case instanceCopyCopy: + logrus.Debugf("Copying instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList)) + c.Printf("Copying image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList)) + unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest) + updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceCopyList[i].sourceDigest) if err != nil { - return nil, err + return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err) } - logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) - // Record the digest/size/type of the manifest that we didn't copy. - updates[i] = update - continue - } - logrus.Debugf("Copying instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) - c.Printf("Copying image %s (%d/%d)\n", instanceDigest, instancesCopied+1, imagesToCopy) - unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceDigest) - updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceDigest) - if err != nil { - return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", instancesCopied+1, imagesToCopy, err) - } - instancesCopied++ - // Record the result of a possible conversion here. - update := manifest.ListUpdate{ - Digest: updatedManifestDigest, - Size: int64(len(updatedManifest)), - MediaType: updatedManifestType, + // Record the result of a possible conversion here. + instanceEdits = append(instanceEdits, internalManifest.ListEdit{ + ListOperation: internalManifest.ListOpUpdate, + UpdateOldDigest: instance.sourceDigest, + UpdateDigest: updatedManifestDigest, + UpdateSize: int64(len(updatedManifest)), + UpdateMediaType: updatedManifestType}) + default: + return nil, fmt.Errorf("copying image: invalid copy operation %d", instance.op) } - updates[i] = update } // Now reset the digest/size/types of the manifests in the list to account for any conversions that we made. - if err = updatedList.UpdateInstances(updates); err != nil { + if err = updatedList.EditInstances(instanceEdits); err != nil { return nil, fmt.Errorf("updating manifest list: %w", err) } diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go index 9afdea73d..b8569a70c 100644 --- a/vendor/github.com/containers/image/v5/copy/single.go +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -256,9 +256,11 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P } sigs = append(sigs, newSigs...) - c.Printf("Storing signatures\n") - if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil { - return nil, "", "", fmt.Errorf("writing signatures: %w", err) + if len(sigs) > 0 { + c.Printf("Storing signatures\n") + if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil { + return nil, "", "", fmt.Errorf("writing signatures: %w", err) + } } return manifestBytes, retManifestType, retManifestDigest, nil diff --git a/vendor/github.com/containers/image/v5/docker/daemon/client.go b/vendor/github.com/containers/image/v5/docker/daemon/client.go index 7d2a98d68..2c245f54f 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/client.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/client.go @@ -21,33 +21,49 @@ func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) { host = sys.DockerDaemonHost } - // Sadly, unix:// sockets don't work transparently with dockerclient.NewClient. - // They work fine with a nil httpClient; with a non-nil httpClient, the transport’s - // TLSClientConfig must be nil (or the client will try using HTTPS over the PF_UNIX socket - // regardless of the values in the *tls.Config), and we would have to call sockets.ConfigureTransport. + opts := []dockerclient.Opt{ + dockerclient.WithHost(host), + dockerclient.WithVersion(defaultAPIVersion), + } + + // We conditionalize building the TLS configuration only to TLS sockets: + // + // The dockerclient.Client implementation differentiates between + // - Client.proto, which is ~how the connection is establishe (IP / AF_UNIX/Windows) + // - Client.scheme, which is what is sent over the connection (HTTP with/without TLS). + // + // Only Client.proto is set from the URL in dockerclient.WithHost(), + // Client.scheme is detected based on a http.Client.TLSClientConfig presence; + // dockerclient.WithHTTPClient with a client that has TLSClientConfig set + // will, by default, trigger an attempt to use TLS. + // + // So, don’t use WithHTTPClient for unix:// sockets at all. // - // We don't really want to configure anything for unix:// sockets, so just pass a nil *http.Client. + // Similarly, if we want to communicate over plain HTTP on a TCP socket (http://), + // we also should not set TLSClientConfig. We continue to use WithHTTPClient + // with our slightly non-default settings to avoid a behavior change on updates of c/image. // - // Similarly, if we want to communicate over plain HTTP on a TCP socket, we also need to set - // TLSClientConfig to nil. This can be achieved by using the form `http://` + // Alternatively we could use dockerclient.WithScheme to drive the TLS/non-TLS logic + // explicitly, but we would still want to set WithHTTPClient (differently) for https:// and http:// ; + // so that would not be any simpler. serverURL, err := dockerclient.ParseHostURL(host) if err != nil { return nil, err } - var httpClient *http.Client - if serverURL.Scheme != "unix" { - if serverURL.Scheme == "http" { - httpClient = httpConfig() - } else { - hc, err := tlsConfig(sys) - if err != nil { - return nil, err - } - httpClient = hc + switch serverURL.Scheme { + case "unix": // Nothing + case "http": + hc := httpConfig() + opts = append(opts, dockerclient.WithHTTPClient(hc)) + default: + hc, err := tlsConfig(sys) + if err != nil { + return nil, err } + opts = append(opts, dockerclient.WithHTTPClient(hc)) } - return dockerclient.NewClient(host, defaultAPIVersion, httpClient, nil) + return dockerclient.NewClientWithOpts(opts...) } func tlsConfig(sys *types.SystemContext) (*http.Client, error) { diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 570cca483..dd9127c5a 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -161,17 +161,6 @@ func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) { return token, nil } -// this is cloned from docker/go-connections because upstream docker has changed -// it and make deps here fails otherwise. -// We'll drop this once we upgrade to docker 1.13.x deps. -func serverDefault() *tls.Config { - return &tls.Config{ - // Avoid fallback to SSL protocols < TLS1.0 - MinVersion: tls.VersionTLS10, - CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, - } -} - // dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort. func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { if sys != nil && sys.DockerCertPath != "" { @@ -254,7 +243,9 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc if registry == dockerHostname { registry = dockerRegistry } - tlsClientConfig := serverDefault() + tlsClientConfig := &tls.Config{ + CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, + } // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry, // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go index 15c9c2279..c3234c377 100644 --- a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go @@ -226,9 +226,9 @@ func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.Ma layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) switch m.m.LayersDescriptors[idx].MediaType { case manifest.DockerV2Schema2ForeignLayerMediaType: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. case manifest.DockerV2SchemaLayerMediaTypeUncompressed: layers[idx].MediaType = imgspecv1.MediaTypeImageLayer case manifest.DockerV2Schema2LayerMediaType: diff --git a/vendor/github.com/containers/image/v5/internal/image/oci.go b/vendor/github.com/containers/image/v5/internal/image/oci.go index 4b74de3e5..166daa0e8 100644 --- a/vendor/github.com/containers/image/v5/internal/image/oci.go +++ b/vendor/github.com/containers/image/v5/internal/image/oci.go @@ -215,11 +215,11 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.Mani for idx := range layers { layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) switch layers[idx].MediaType { - case imgspecv1.MediaTypeImageLayerNonDistributable: + case imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType - case imgspecv1.MediaTypeImageLayerNonDistributableGzip: + case imgspecv1.MediaTypeImageLayerNonDistributableGzip: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip - case imgspecv1.MediaTypeImageLayerNonDistributableZstd: + case imgspecv1.MediaTypeImageLayerNonDistributableZstd: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) case imgspecv1.MediaTypeImageLayer: layers[idx].MediaType = manifest.DockerV2SchemaLayerMediaTypeUncompressed diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go index e98c5c99e..516ca7ac9 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go @@ -69,27 +69,71 @@ func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdat // UpdateInstances updates the sizes, digests, and media types of the manifests // which the list catalogs. -func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error { - if len(updates) != len(list.Manifests) { - return fmt.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates)) +func (index *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error { + editInstances := []ListEdit{} + for i, instance := range updates { + editInstances = append(editInstances, ListEdit{ + UpdateOldDigest: index.Manifests[i].Digest, + UpdateDigest: instance.Digest, + UpdateSize: instance.Size, + UpdateMediaType: instance.MediaType, + ListOperation: ListOpUpdate}) } - for i := range updates { - if err := updates[i].Digest.Validate(); err != nil { - return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err) - } - list.Manifests[i].Digest = updates[i].Digest - if updates[i].Size < 0 { - return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) - } - list.Manifests[i].Size = updates[i].Size - if updates[i].MediaType == "" { - return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType) + return index.editInstances(editInstances) +} + +func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error { + addedEntries := []Schema2ManifestDescriptor{} + for i, editInstance := range editInstances { + switch editInstance.ListOperation { + case ListOpUpdate: + if err := editInstance.UpdateOldDigest.Validate(); err != nil { + return fmt.Errorf("Schema2List.EditInstances: Attempting to update %s which is an invalid digest: %w", editInstance.UpdateOldDigest, err) + } + if err := editInstance.UpdateDigest.Validate(); err != nil { + return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err) + } + targetIndex := slices.IndexFunc(index.Manifests, func(m Schema2ManifestDescriptor) bool { + return m.Digest == editInstance.UpdateOldDigest + }) + if targetIndex == -1 { + return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest) + } + index.Manifests[targetIndex].Digest = editInstance.UpdateDigest + if editInstance.UpdateSize < 0 { + return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize) + } + index.Manifests[targetIndex].Size = editInstance.UpdateSize + if editInstance.UpdateMediaType == "" { + return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType) + } + index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType + case ListOpAdd: + addInstance := Schema2ManifestDescriptor{ + Schema2Descriptor{Digest: editInstance.AddDigest, Size: editInstance.AddSize, MediaType: editInstance.AddMediaType}, + Schema2PlatformSpec{ + OS: editInstance.AddPlatform.OS, + Architecture: editInstance.AddPlatform.Architecture, + OSVersion: editInstance.AddPlatform.OSVersion, + OSFeatures: editInstance.AddPlatform.OSFeatures, + Variant: editInstance.AddPlatform.Variant, + }, + } + addedEntries = append(addedEntries, addInstance) + default: + return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation) } - list.Manifests[i].MediaType = updates[i].MediaType + } + if len(addedEntries) != 0 { + index.Manifests = append(index.Manifests, addedEntries...) } return nil } +func (index *Schema2List) EditInstances(editInstances []ListEdit) error { + return index.editInstances(editInstances) +} + func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { // ChooseInstanceByCompression is same as ChooseInstance for schema2 manifest list. return list.ChooseInstance(ctx) diff --git a/vendor/github.com/containers/image/v5/internal/manifest/list.go b/vendor/github.com/containers/image/v5/internal/manifest/list.go index 07c7d85f4..8786324ea 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/list.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/list.go @@ -3,6 +3,7 @@ package manifest import ( "fmt" + compression "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -55,6 +56,10 @@ type List interface { // SystemContext ( or for the current platform if the SystemContext doesn't specify any detail ) and preferGzip for compression which // when configured to OptionalBoolTrue and chooses best available compression when it is OptionalBoolFalse or left OptionalBoolUndefined. ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) + // Edit information about the list's instances. Contains Slice of ListEdit where each element + // is responsible for either Modifying or Adding a new instance to the Manifest. Operation is + // selected on the basis of configured ListOperation field. + EditInstances([]ListEdit) error } // ListUpdate includes the fields which a List's UpdateInstances() method will modify. @@ -65,6 +70,36 @@ type ListUpdate struct { MediaType string } +type ListOp int + +const ( + listOpInvalid ListOp = iota + ListOpAdd + ListOpUpdate +) + +// ListEdit includes the fields which a List's EditInstances() method will modify. +type ListEdit struct { + ListOperation ListOp + + // if Op == ListEditUpdate (basically the previous UpdateInstances). All fields must be set. + UpdateOldDigest digest.Digest + UpdateDigest digest.Digest + UpdateSize int64 + UpdateMediaType string + UpdateAffectAnnotations bool + UpdateAnnotations map[string]string + UpdateCompressionAlgorithms []compression.Algorithm + + // If Op = ListEditAdd. All fields must be set. + AddDigest digest.Digest + AddSize int64 + AddMediaType string + AddPlatform *imgspecv1.Platform + AddAnnotations map[string]string + AddCompressionAlgorithms []compression.Algorithm +} + // ListPublicFromBlob parses a list of manifests. // This is publicly visible as c/image/manifest.ListFromBlob. func ListPublicFromBlob(manifest []byte, manifestMIMEType string) (ListPublic, error) { diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go index 8e911678e..fd251d951 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go @@ -7,6 +7,7 @@ import ( "runtime" platform "github.com/containers/image/v5/internal/pkg/platform" + compression "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" @@ -22,7 +23,8 @@ const ( // That also suggests that this instance benefits from // Zstd compression, so it can be preferred by compatible consumers over instances that // use gzip, depending on their local policy. - OCI1InstanceAnnotationCompressionZSTD = "io.github.containers.compression.zstd" + OCI1InstanceAnnotationCompressionZSTD = "io.github.containers.compression.zstd" + OCI1InstanceAnnotationCompressionZSTDValue = "true" ) // OCI1IndexPublic is just an alias for the OCI index type, but one which we can @@ -64,26 +66,102 @@ func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate // UpdateInstances updates the sizes, digests, and media types of the manifests // which the list catalogs. func (index *OCI1IndexPublic) UpdateInstances(updates []ListUpdate) error { - if len(updates) != len(index.Manifests) { - return fmt.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates)) + editInstances := []ListEdit{} + for i, instance := range updates { + editInstances = append(editInstances, ListEdit{ + UpdateOldDigest: index.Manifests[i].Digest, + UpdateDigest: instance.Digest, + UpdateSize: instance.Size, + UpdateMediaType: instance.MediaType, + ListOperation: ListOpUpdate}) } - for i := range updates { - if err := updates[i].Digest.Validate(); err != nil { - return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err) - } - index.Manifests[i].Digest = updates[i].Digest - if updates[i].Size < 0 { - return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) + return index.editInstances(editInstances) +} + +func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, annotationsMap map[string]string) { + // TODO: This should also delete the algorithm if map already contains an algorithm and compressionAlgorithm + // list has a different algorithm. To do that, we would need to modify the callers to always provide a reliable + // and full compressionAlghorithms list. + for _, algo := range compressionAlgorithms { + switch algo.Name() { + case compression.ZstdAlgorithmName: + annotationsMap[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue + default: + continue } - index.Manifests[i].Size = updates[i].Size - if updates[i].MediaType == "" { - return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType) + } +} + +func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error { + addedEntries := []imgspecv1.Descriptor{} + updatedAnnotations := false + for i, editInstance := range editInstances { + switch editInstance.ListOperation { + case ListOpUpdate: + if err := editInstance.UpdateOldDigest.Validate(); err != nil { + return fmt.Errorf("OCI1Index.EditInstances: Attempting to update %s which is an invalid digest: %w", editInstance.UpdateOldDigest, err) + } + if err := editInstance.UpdateDigest.Validate(); err != nil { + return fmt.Errorf("OCI1Index.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err) + } + targetIndex := slices.IndexFunc(index.Manifests, func(m imgspecv1.Descriptor) bool { + return m.Digest == editInstance.UpdateOldDigest + }) + if targetIndex == -1 { + return fmt.Errorf("OCI1Index.EditInstances: digest %s not found", editInstance.UpdateOldDigest) + } + index.Manifests[targetIndex].Digest = editInstance.UpdateDigest + if editInstance.UpdateSize < 0 { + return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize) + } + index.Manifests[targetIndex].Size = editInstance.UpdateSize + if editInstance.UpdateMediaType == "" { + return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType) + } + index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType + if editInstance.UpdateAnnotations != nil { + updatedAnnotations = true + if editInstance.UpdateAffectAnnotations { + index.Manifests[targetIndex].Annotations = maps.Clone(editInstance.UpdateAnnotations) + } else { + if index.Manifests[targetIndex].Annotations == nil { + index.Manifests[targetIndex].Annotations = map[string]string{} + } + maps.Copy(index.Manifests[targetIndex].Annotations, editInstance.UpdateAnnotations) + } + } + addCompressionAnnotations(editInstance.UpdateCompressionAlgorithms, index.Manifests[targetIndex].Annotations) + case ListOpAdd: + annotations := map[string]string{} + if editInstance.AddAnnotations != nil { + annotations = maps.Clone(editInstance.AddAnnotations) + } + addCompressionAnnotations(editInstance.AddCompressionAlgorithms, annotations) + addedEntries = append(addedEntries, imgspecv1.Descriptor{ + MediaType: editInstance.AddMediaType, + Size: editInstance.AddSize, + Digest: editInstance.AddDigest, + Platform: editInstance.AddPlatform, + Annotations: annotations}) + default: + return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation) } - index.Manifests[i].MediaType = updates[i].MediaType + } + if len(addedEntries) != 0 { + index.Manifests = append(index.Manifests, addedEntries...) + } + if len(addedEntries) != 0 || updatedAnnotations { + slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) bool { + return !instanceIsZstd(a) && instanceIsZstd(b) + }) } return nil } +func (index *OCI1Index) EditInstances(editInstances []ListEdit) error { + return index.editInstances(editInstances) +} + // instanceIsZstd returns true if instance is a zstd instance otherwise false. func instanceIsZstd(manifest imgspecv1.Descriptor) bool { if value, ok := manifest.Annotations[OCI1InstanceAnnotationCompressionZSTD]; ok && value == "true" { @@ -131,24 +209,20 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi for manifestIndex, d := range index.Manifests { candidate := instanceCandidate{platformIndex: math.MaxInt, manifestPosition: manifestIndex, isZstd: instanceIsZstd(d), digest: d.Digest} if d.Platform != nil { - foundPlatform := false - for platformIndex, wantedPlatform := range wantedPlatforms { - imagePlatform := imgspecv1.Platform{ - Architecture: d.Platform.Architecture, - OS: d.Platform.OS, - OSVersion: d.Platform.OSVersion, - OSFeatures: slices.Clone(d.Platform.OSFeatures), - Variant: d.Platform.Variant, - } - if platform.MatchesPlatform(imagePlatform, wantedPlatform) { - foundPlatform = true - candidate.platformIndex = platformIndex - break - } + imagePlatform := imgspecv1.Platform{ + Architecture: d.Platform.Architecture, + OS: d.Platform.OS, + OSVersion: d.Platform.OSVersion, + OSFeatures: slices.Clone(d.Platform.OSFeatures), + Variant: d.Platform.Variant, } - if !foundPlatform { + platformIndex := slices.IndexFunc(wantedPlatforms, func(wantedPlatform imgspecv1.Platform) bool { + return platform.MatchesPlatform(imagePlatform, wantedPlatform) + }) + if platformIndex == -1 { continue } + candidate.platformIndex = platformIndex } if bestMatch == nil || candidate.isPreferredOver(bestMatch, didPreferGzip) { bestMatch = &candidate diff --git a/vendor/github.com/containers/image/v5/internal/set/set.go b/vendor/github.com/containers/image/v5/internal/set/set.go index 5c7bcabef..3e777fe12 100644 --- a/vendor/github.com/containers/image/v5/internal/set/set.go +++ b/vendor/github.com/containers/image/v5/internal/set/set.go @@ -24,11 +24,11 @@ func NewWithValues[E comparable](values ...E) *Set[E] { return s } -func (s Set[E]) Add(v E) { +func (s *Set[E]) Add(v E) { s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again. } -func (s Set[E]) Delete(v E) { +func (s *Set[E]) Delete(v E) { delete(s.m, v) } diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index eb2354768..a70470d99 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -42,7 +42,12 @@ type OCI1 struct { // useful for validation anyway. func SupportedOCI1MediaType(m string) error { switch m { - case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeLayoutHeader, ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc: + case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, + imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerZstd, + imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. + imgspecv1.MediaTypeImageManifest, + imgspecv1.MediaTypeLayoutHeader, + ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc: return nil default: return fmt.Errorf("unsupported OCIv1 media type: %q", m) @@ -102,9 +107,9 @@ func (m *OCI1) LayerInfos() []LayerInfo { var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{ { - mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable, - compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableGzip, - compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableZstd, + mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. + compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableGzip, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. + compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. }, { mtsUncompressed: imgspecv1.MediaTypeImageLayer, @@ -166,7 +171,8 @@ func getEncryptedMediaType(mediatype string) (string, error) { } unsuffixedMediatype := strings.Split(mediatype, "+")[0] switch unsuffixedMediatype { - case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable: + case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, + imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. return mediatype + "+encrypted", nil } diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go index 4a4ab9b2c..6586b8440 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go @@ -188,14 +188,18 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { return index.Manifests[0], nil } else { // if image specified, look through all manifests for a match + var unsupportedMIMETypes []string for _, md := range index.Manifests { - if md.MediaType != imgspecv1.MediaTypeImageManifest && md.MediaType != imgspecv1.MediaTypeImageIndex { - continue - } if refName, ok := md.Annotations[imgspecv1.AnnotationRefName]; ok && refName == ref.image { - return md, nil + if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex { + return md, nil + } + unsupportedMIMETypes = append(unsupportedMIMETypes, md.MediaType) } } + if len(unsupportedMIMETypes) != 0 { + return imgspecv1.Descriptor{}, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes) + } } return imgspecv1.Descriptor{}, ImageNotFoundError{ref} } diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go index 0b737f020..c6498f6ca 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go @@ -17,8 +17,8 @@ import ( "strings" "time" + "dario.cat/mergo" "github.com/containers/storage/pkg/homedir" - "github.com/imdario/mergo" "github.com/sirupsen/logrus" "golang.org/x/exp/slices" "gopkg.in/yaml.v3" @@ -957,8 +957,6 @@ func tlsConfigFor(c *restConfig) (*tls.Config, error) { } tlsConfig := &tls.Config{ - // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) - MinVersion: tls.VersionTLS10, InsecureSkipVerify: c.Insecure, } diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index 0e3003cec..2e79d0ffb 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -48,9 +48,9 @@ var ( ErrNotSupported = errors.New("not supported") ) -// authPath combines a path to a file with container registry access keys, -// along with expected properties of that path (currently just whether it's) -// legacy format or not. +// authPath combines a path to a file with container registry credentials, +// along with expected properties of that path (currently just whether it's +// legacy format or not). type authPath struct { path string legacyFormat bool @@ -87,12 +87,12 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s switch helper { // Special-case the built-in helpers for auth files. case sysregistriesv2.AuthenticationFileHelper: - desc, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, string, error) { - if ch, exists := auths.CredHelpers[key]; exists { + desc, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) { + if ch, exists := fileContents.CredHelpers[key]; exists { if isNamespaced { return false, "", unsupportedNamespaceErr(ch) } - desc, err := setAuthToCredHelper(ch, key, username, password) + desc, err := setCredsInCredHelper(ch, key, username, password) if err != nil { return false, "", err } @@ -100,7 +100,7 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s } creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) newCreds := dockerAuthConfig{Auth: creds} - auths.AuthConfigs[key] = newCreds + fileContents.AuthConfigs[key] = newCreds return true, "", nil }) // External helpers. @@ -108,7 +108,7 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s if isNamespaced { err = unsupportedNamespaceErr(helper) } else { - desc, err = setAuthToCredHelper(helper, key, username, password) + desc, err = setCredsInCredHelper(helper, key, username, password) } } if err != nil { @@ -156,17 +156,17 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon case sysregistriesv2.AuthenticationFileHelper: for _, path := range getAuthFilePaths(sys, homedir.Get()) { // parse returns an empty map in case the path doesn't exist. - auths, err := path.parse() + fileContents, err := path.parse() if err != nil { return nil, fmt.Errorf("reading JSON file %q: %w", path.path, err) } // Credential helpers in the auth file have a // direct mapping to a registry, so we can just // walk the map. - for registry := range auths.CredHelpers { + for registry := range fileContents.CredHelpers { allKeys.Add(registry) } - for key := range auths.AuthConfigs { + for key := range fileContents.AuthConfigs { key := normalizeAuthFileKey(key, path.legacyFormat) if key == normalizedDockerIORegistry { key = "docker.io" @@ -176,7 +176,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon } // External helpers. default: - creds, err := listAuthsFromCredHelper(helper) + creds, err := listCredsInCredHelper(helper) if err != nil { logrus.Debugf("Error listing credentials stored in credential helper %s: %v", helper, err) if errors.Is(err, exec.ErrNotFound) { @@ -193,19 +193,19 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // Now use `GetCredentials` to the specific auth configs for each // previously listed registry. - authConfigs := make(map[string]types.DockerAuthConfig) + allCreds := make(map[string]types.DockerAuthConfig) for _, key := range allKeys.Values() { - authConf, err := GetCredentials(sys, key) + creds, err := GetCredentials(sys, key) if err != nil { // Note: we rely on the logging in `GetCredentials`. return nil, err } - if authConf != (types.DockerAuthConfig{}) { - authConfigs[key] = authConf + if creds != (types.DockerAuthConfig{}) { + allCreds[key] = creds } } - return authConfigs, nil + return allCreds, nil } // getAuthFilePaths returns a slice of authPaths based on the system context @@ -285,13 +285,13 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t // Anonymous function to query credentials from auth files. getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, string, error) { for _, path := range getAuthFilePaths(sys, homeDir) { - authConfig, err := findCredentialsInFile(key, registry, path) + creds, err := findCredentialsInFile(key, registry, path) if err != nil { return types.DockerAuthConfig{}, "", err } - if authConfig != (types.DockerAuthConfig{}) { - return authConfig, path.path, nil + if creds != (types.DockerAuthConfig{}) { + return creds, path.path, nil } } return types.DockerAuthConfig{}, "", nil @@ -320,7 +320,7 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t // This intentionally uses "registry", not "key"; we don't support namespaced // credentials in helpers, but a "registry" is a valid parent of "key". helperKey = registry - creds, err = getAuthFromCredHelper(helper, registry) + creds, err = getCredsFromCredHelper(helper, registry) } if err != nil { logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err) @@ -360,14 +360,14 @@ func GetAuthentication(sys *types.SystemContext, key string) (string, string, er // getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication, // it exists only to allow testing it with an artificial home directory. func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string) (string, string, error) { - auth, err := getCredentialsWithHomeDir(sys, key, homeDir) + creds, err := getCredentialsWithHomeDir(sys, key, homeDir) if err != nil { return "", "", err } - if auth.IdentityToken != "" { + if creds.IdentityToken != "" { return "", "", fmt.Errorf("non-empty identity token found and this API doesn't support it: %w", ErrNotSupported) } - return auth.Username, auth.Password, nil + return creds.Username, creds.Password, nil } // RemoveAuthentication removes credentials for `key` from all possible @@ -393,7 +393,7 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error { logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper) return } - err := deleteAuthFromCredHelper(helper, key) + err := deleteCredsFromCredHelper(helper, key) if err == nil { logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper) isLoggedIn = true @@ -411,13 +411,13 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error { switch helper { // Special-case the built-in helper for auth files. case sysregistriesv2.AuthenticationFileHelper: - _, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, string, error) { - if innerHelper, exists := auths.CredHelpers[key]; exists { + _, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) { + if innerHelper, exists := fileContents.CredHelpers[key]; exists { removeFromCredHelper(innerHelper) } - if _, ok := auths.AuthConfigs[key]; ok { + if _, ok := fileContents.AuthConfigs[key]; ok { isLoggedIn = true - delete(auths.AuthConfigs, key) + delete(fileContents.AuthConfigs, key) } return true, "", multiErr }) @@ -454,23 +454,23 @@ func RemoveAllAuthentication(sys *types.SystemContext) error { switch helper { // Special-case the built-in helper for auth files. case sysregistriesv2.AuthenticationFileHelper: - _, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, string, error) { - for registry, helper := range auths.CredHelpers { + _, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) { + for registry, helper := range fileContents.CredHelpers { // Helpers in auth files are expected // to exist, so no special treatment // for them. - if err := deleteAuthFromCredHelper(helper, registry); err != nil { + if err := deleteCredsFromCredHelper(helper, registry); err != nil { return false, "", err } } - auths.CredHelpers = make(map[string]string) - auths.AuthConfigs = make(map[string]dockerAuthConfig) + fileContents.CredHelpers = make(map[string]string) + fileContents.AuthConfigs = make(map[string]dockerAuthConfig) return true, "", nil }) // External helpers. default: var creds map[string]string - creds, err = listAuthsFromCredHelper(helper) + creds, err = listCredsInCredHelper(helper) if err != nil { if errors.Is(err, exec.ErrNotFound) { // It's okay if the helper doesn't exist. @@ -480,7 +480,7 @@ func RemoveAllAuthentication(sys *types.SystemContext) error { } } for registry := range creds { - err = deleteAuthFromCredHelper(helper, registry) + err = deleteCredsFromCredHelper(helper, registry) if err != nil { break } @@ -497,7 +497,7 @@ func RemoveAllAuthentication(sys *types.SystemContext) error { return multiErr } -func listAuthsFromCredHelper(credHelper string) (map[string]string, error) { +func listCredsInCredHelper(credHelper string) (map[string]string, error) { helperName := fmt.Sprintf("docker-credential-%s", credHelper) p := helperclient.NewShellProgramFunc(helperName) return helperclient.List(p) @@ -543,40 +543,40 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool, return newAuthPathDefault(fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil } -// parse unmarshals the authentications stored in the auth.json file and returns it +// parse unmarshals the credentials stored in the auth.json file and returns it // or returns an empty dockerConfigFile data structure if auth.json does not exist // if the file exists and is empty, this function returns an error. func (path authPath) parse() (dockerConfigFile, error) { - var auths dockerConfigFile + var fileContents dockerConfigFile raw, err := os.ReadFile(path.path) if err != nil { if os.IsNotExist(err) { - auths.AuthConfigs = map[string]dockerAuthConfig{} - return auths, nil + fileContents.AuthConfigs = map[string]dockerAuthConfig{} + return fileContents, nil } return dockerConfigFile{}, err } if path.legacyFormat { - if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil { + if err = json.Unmarshal(raw, &fileContents.AuthConfigs); err != nil { return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err) } - return auths, nil + return fileContents, nil } - if err = json.Unmarshal(raw, &auths); err != nil { + if err = json.Unmarshal(raw, &fileContents); err != nil { return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err) } - if auths.AuthConfigs == nil { - auths.AuthConfigs = map[string]dockerAuthConfig{} + if fileContents.AuthConfigs == nil { + fileContents.AuthConfigs = map[string]dockerAuthConfig{} } - if auths.CredHelpers == nil { - auths.CredHelpers = make(map[string]string) + if fileContents.CredHelpers == nil { + fileContents.CredHelpers = make(map[string]string) } - return auths, nil + return fileContents, nil } // modifyJSON finds an auth.json file, calls editor on the contents, and @@ -585,7 +585,7 @@ func (path authPath) parse() (dockerConfigFile, error) { // // The editor may also return a human-readable description of the updated location; if it is "", // the file itself is used. -func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, string, error)) (string, error) { +func modifyJSON(sys *types.SystemContext, editor func(fileContents *dockerConfigFile) (bool, string, error)) (string, error) { path, _, err := getPathToAuth(sys) if err != nil { return "", err @@ -599,17 +599,17 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) ( return "", err } - auths, err := path.parse() + fileContents, err := path.parse() if err != nil { return "", fmt.Errorf("reading JSON file %q: %w", path.path, err) } - updated, description, err := editor(&auths) + updated, description, err := editor(&fileContents) if err != nil { return "", fmt.Errorf("updating %q: %w", path.path, err) } if updated { - newData, err := json.MarshalIndent(auths, "", "\t") + newData, err := json.MarshalIndent(fileContents, "", "\t") if err != nil { return "", fmt.Errorf("marshaling JSON %q: %w", path.path, err) } @@ -625,7 +625,7 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) ( return description, nil } -func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) { +func getCredsFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) { helperName := fmt.Sprintf("docker-credential-%s", credHelper) p := helperclient.NewShellProgramFunc(helperName) creds, err := helperclient.Get(p, registry) @@ -650,9 +650,9 @@ func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, } } -// setAuthToCredHelper stores (username, password) for registry in credHelper. +// setCredsInCredHelper stores (username, password) for registry in credHelper. // Returns a human-readable description of the destination, to be returned by SetCredentials. -func setAuthToCredHelper(credHelper, registry, username, password string) (string, error) { +func setCredsInCredHelper(credHelper, registry, username, password string) (string, error) { helperName := fmt.Sprintf("docker-credential-%s", credHelper) p := helperclient.NewShellProgramFunc(helperName) creds := &credentials.Credentials{ @@ -666,7 +666,7 @@ func setAuthToCredHelper(credHelper, registry, username, password string) (strin return fmt.Sprintf("credential helper: %s", credHelper), nil } -func deleteAuthFromCredHelper(credHelper, registry string) error { +func deleteCredsFromCredHelper(credHelper, registry string) error { helperName := fmt.Sprintf("docker-credential-%s", credHelper) p := helperclient.NewShellProgramFunc(helperName) return helperclient.Erase(p, registry) @@ -675,7 +675,7 @@ func deleteAuthFromCredHelper(credHelper, registry string) error { // findCredentialsInFile looks for credentials matching "key" // (which is "registry" or a namespace in "registry") in "path". func findCredentialsInFile(key, registry string, path authPath) (types.DockerAuthConfig, error) { - auths, err := path.parse() + fileContents, err := path.parse() if err != nil { return types.DockerAuthConfig{}, fmt.Errorf("reading JSON file %q: %w", path.path, err) } @@ -683,9 +683,9 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut // First try cred helpers. They should always be normalized. // This intentionally uses "registry", not "key"; we don't support namespaced // credentials in helpers. - if ch, exists := auths.CredHelpers[registry]; exists { + if ch, exists := fileContents.CredHelpers[registry]; exists { logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path.path) - return getAuthFromCredHelper(ch, registry) + return getCredsFromCredHelper(ch, registry) } // Support sub-registry namespaces in auth. @@ -701,7 +701,7 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut // Repo or namespace keys are only supported as exact matches. For registry // keys we prefer exact matches as well. for _, key := range keys { - if val, exists := auths.AuthConfigs[key]; exists { + if val, exists := fileContents.AuthConfigs[key]; exists { return decodeDockerAuth(path.path, key, val) } } @@ -715,7 +715,7 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut // The docker.io registry still uses the /v1/ key with a special host name, // so account for that as well. registry = normalizeRegistry(registry) - for k, v := range auths.AuthConfigs { + for k, v := range fileContents.AuthConfigs { if normalizeAuthFileKey(k, path.legacyFormat) == registry { return decodeDockerAuth(path.path, k, v) } diff --git a/vendor/github.com/containers/image/v5/sif/src.go b/vendor/github.com/containers/image/v5/sif/src.go index b645f80dd..1f6ab7f3b 100644 --- a/vendor/github.com/containers/image/v5/sif/src.go +++ b/vendor/github.com/containers/image/v5/sif/src.go @@ -96,9 +96,11 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere created := sifImg.ModifiedAt() config := imgspecv1.Image{ - Created: &created, - Architecture: sifImg.PrimaryArch(), - OS: "linux", + Created: &created, + Platform: imgspecv1.Platform{ + Architecture: sifImg.PrimaryArch(), + OS: "linux", + }, Config: imgspecv1.ImageConfig{ Cmd: commandLine, }, @@ -180,7 +182,7 @@ func (s *sifImageSource) Close() error { func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { switch info.Digest { case s.configDigest: - return io.NopCloser(bytes.NewBuffer(s.config)), int64(len(s.config)), nil + return io.NopCloser(bytes.NewReader(s.config)), int64(len(s.config)), nil case s.layerDigest: reader, err := os.Open(s.layerFile) if err != nil { diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go index 0fb3a8393..6f9bfaf75 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_src.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go @@ -19,7 +19,6 @@ import ( imgspecs "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) type tarballImageSource struct { @@ -29,42 +28,46 @@ type tarballImageSource struct { impl.DoesNotAffectLayerInfosForCopy stubs.NoGetBlobAtInitialize - reference tarballReference - filenames []string - diffIDs []digest.Digest - diffSizes []int64 - blobIDs []digest.Digest - blobSizes []int64 - blobTypes []string - config []byte - configID digest.Digest - configSize int64 - manifest []byte + reference tarballReference + blobs map[digest.Digest]tarballBlob + manifest []byte +} + +// tarballBlob is a blob that tarballImagSource can return by GetBlob. +type tarballBlob struct { + contents []byte // or nil to read from filename below + filename string // valid if contents == nil + size int64 } func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - // Gather up the digests, sizes, and date information for all of the files. - filenames := []string{} + // Pick up the layer comment from the configuration's history list, if one is set. + comment := "imported from tarball" + if len(r.config.History) > 0 && r.config.History[0].Comment != "" { + comment = r.config.History[0].Comment + } + + // Gather up the digests, sizes, and history information for all of the files. + blobs := map[digest.Digest]tarballBlob{} diffIDs := []digest.Digest{} - diffSizes := []int64{} - blobIDs := []digest.Digest{} - blobSizes := []int64{} - blobTimes := []time.Time{} - blobTypes := []string{} + created := time.Time{} + history := []imgspecv1.History{} + layerDescriptors := []imgspecv1.Descriptor{} for _, filename := range r.filenames { - var file *os.File - var err error - var blobSize int64 - var blobTime time.Time var reader io.Reader + var blobTime time.Time + var blob tarballBlob if filename == "-" { - blobSize = int64(len(r.stdin)) - blobTime = time.Now() reader = bytes.NewReader(r.stdin) + blobTime = time.Now() + blob = tarballBlob{ + contents: r.stdin, + size: int64(len(r.stdin)), + } } else { - file, err = os.Open(filename) + file, err := os.Open(filename) if err != nil { - return nil, fmt.Errorf("error opening %q for reading: %w", filename, err) + return nil, err } defer file.Close() reader = file @@ -72,8 +75,11 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System if err != nil { return nil, fmt.Errorf("error reading size of %q: %w", filename, err) } - blobSize = fileinfo.Size() blobTime = fileinfo.ModTime() + blob = tarballBlob{ + filename: filename, + size: fileinfo.Size(), + } } // Default to assuming the layer is compressed. @@ -96,8 +102,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System uncompressed = nil } // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - n, err := io.Copy(io.Discard, reader) - if err != nil { + if _, err := io.Copy(io.Discard, reader); err != nil { return nil, fmt.Errorf("error reading %q: %v", filename, err) } if uncompressed != nil { @@ -105,38 +110,26 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System } // Grab our uncompressed and possibly-compressed digests and sizes. - filenames = append(filenames, filename) - diffIDs = append(diffIDs, diffIDdigester.Digest()) - diffSizes = append(diffSizes, n) - blobIDs = append(blobIDs, blobIDdigester.Digest()) - blobSizes = append(blobSizes, blobSize) - blobTimes = append(blobTimes, blobTime) - blobTypes = append(blobTypes, layerType) - } + diffID := diffIDdigester.Digest() + blobID := blobIDdigester.Digest() + diffIDs = append(diffIDs, diffID) + blobs[blobID] = blob - // Build the rootfs and history for the configuration blob. - rootfs := imgspecv1.RootFS{ - Type: "layers", - DiffIDs: diffIDs, - } - created := time.Time{} - history := []imgspecv1.History{} - // Pick up the layer comment from the configuration's history list, if one is set. - comment := "imported from tarball" - if len(r.config.History) > 0 && r.config.History[0].Comment != "" { - comment = r.config.History[0].Comment - } - for i := range diffIDs { - createdBy := fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffIDs[i].Hex(), os.PathSeparator) history = append(history, imgspecv1.History{ - Created: &blobTimes[i], - CreatedBy: createdBy, + Created: &blobTime, + CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffID.Hex(), os.PathSeparator), Comment: comment, }) // Use the mtime of the most recently modified file as the image's creation time. - if created.Before(blobTimes[i]) { - created = blobTimes[i] + if created.Before(blobTime) { + created = blobTime } + + layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{ + Digest: blobID, + Size: blob.size, + MediaType: layerType, + }) } // Pick up other defaults from the config in the reference. @@ -150,7 +143,10 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System if config.OS == "" { config.OS = runtime.GOOS } - config.RootFS = rootfs + config.RootFS = imgspecv1.RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } config.History = history // Encode and digest the image configuration blob. @@ -159,24 +155,19 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err) } configID := digest.Canonical.FromBytes(configBytes) - configSize := int64(len(configBytes)) - - // Populate a manifest with the configuration blob and the file as the single layer. - layerDescriptors := []imgspecv1.Descriptor{} - for i := range blobIDs { - layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{ - Digest: blobIDs[i], - Size: blobSizes[i], - MediaType: blobTypes[i], - }) + blobs[configID] = tarballBlob{ + contents: configBytes, + size: int64(len(configBytes)), } + + // Populate a manifest with the configuration blob and the layers. manifest := imgspecv1.Manifest{ Versioned: imgspecs.Versioned{ SchemaVersion: 2, }, Config: imgspecv1.Descriptor{ Digest: configID, - Size: configSize, + Size: int64(len(configBytes)), MediaType: imgspecv1.MediaTypeImageConfig, }, Layers: layerDescriptors, @@ -196,17 +187,9 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System }), NoGetBlobAtInitialize: stubs.NoGetBlobAt(r), - reference: *r, - filenames: filenames, - diffIDs: diffIDs, - diffSizes: diffSizes, - blobIDs: blobIDs, - blobSizes: blobSizes, - blobTypes: blobTypes, - config: configBytes, - configID: configID, - configSize: configSize, - manifest: manifestBytes, + reference: *r, + blobs: blobs, + manifest: manifestBytes, } src.Compat = impl.AddCompat(src) @@ -221,24 +204,18 @@ func (is *tarballImageSource) Close() error { // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - // We should only be asked about things in the manifest. Maybe the configuration blob. - if blobinfo.Digest == is.configID { - return io.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil - } - // Maybe one of the layer blobs. - i := slices.Index(is.blobIDs, blobinfo.Digest) - if i == -1 { + blob, ok := is.blobs[blobinfo.Digest] + if !ok { return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String()) } - // We want to read that layer: open the file or memory block and hand it back. - if is.filenames[i] == "-" { - return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil + if blob.contents != nil { + return io.NopCloser(bytes.NewReader(blob.contents)), int64(len(blob.contents)), nil } - reader, err := os.Open(is.filenames[i]) + reader, err := os.Open(blob.filename) if err != nil { - return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err) + return nil, -1, err } - return reader, is.blobSizes[i], nil + return reader, blob.size, nil } // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index 6ea414b86..33adb5f1d 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -585,9 +585,9 @@ type SystemContext struct { // resolving to Docker Hub in the Docker-compatible REST API of Podman; it should never be used outside this // specific context. PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub bool - // If not "", overrides the default path for the authentication file, but only new format files + // If not "", overrides the default path for the registry authentication file, but only new format files AuthFilePath string - // if not "", overrides the default path for the authentication file, but with the legacy format; + // if not "", overrides the default path for the registry authentication file, but with the legacy format; // the code currently will by default look for legacy format files like .dockercfg in the $HOME dir; // but in addition to the home dir, openshift may mount .dockercfg files (via secret mount) // in locations other than the home dir; openshift components should then set this field in those cases; diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 16a6d5816..afb35157b 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,7 +6,7 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 25 + VersionMinor = 26 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 0 diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index 4e3ba7317..8ef38e2cd 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -17,13 +17,13 @@ env: #### #### Cache-image names to test with (double-quotes around names are critical) ### - FEDORA_NAME: "fedora-37" - DEBIAN_NAME: "debian-12" + FEDORA_NAME: "fedora-38" + DEBIAN_NAME: "debian-13" # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - IMAGE_SUFFIX: "c20230405t152256z-f37f36d12" + IMAGE_SUFFIX: "c20230614t132754z-f38f37d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" @@ -56,7 +56,6 @@ gce_instance: linux_testing: &linux_testing depends_on: - lint - only_if: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' gce_instance: # Only need to specify differences from defaults (above) image_name: "${VM_IMAGE}" @@ -127,10 +126,12 @@ lint_task: fingerprint_script: cat go.sum folder: $GOPATH/pkg/mod build_script: | - echo "deb http://deb.debian.org/debian stretch-backports main" > /etc/apt/sources.list.d/backports.list apt-get update apt-get install -y libbtrfs-dev libdevmapper-dev - test_script: make TAGS=regex_precompile local-validate && make lint && make clean + test_script: | + make TAGS=regex_precompile local-validate + make lint + make clean # Update metadata on VM images referenced by this repository state @@ -168,7 +169,7 @@ vendor_task: cross_task: container: - image: golang:1.17 + image: golang:1.19 build_script: make cross @@ -182,6 +183,6 @@ success_task: - vendor - cross container: - image: golang:1.17 + image: golang:1.19 clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed script: /bin/true diff --git a/vendor/github.com/containers/storage/.golangci.yml b/vendor/github.com/containers/storage/.golangci.yml index 755aa35c0..20968466c 100644 --- a/vendor/github.com/containers/storage/.golangci.yml +++ b/vendor/github.com/containers/storage/.golangci.yml @@ -4,68 +4,8 @@ run: deadline: 5m skip-dirs-use-default: true linters: - enable-all: true + enable: + - gofumpt disable: - - cyclop - - deadcode - - dogsled - - dupl - errcheck - - errname - - errorlint - - exhaustive - - exhaustivestruct - - exhaustruct - - forbidigo - - forcetypeassert - - funlen - - gci - - gochecknoglobals - - gochecknoinits - - gocognit - - gocritic - - gocyclo - - godot - - godox - - goerr113 - - gofumpt - - golint - - gomnd - - gosec - - gosimple - - govet - - ifshort - - ineffassign - - interfacer - - interfacebloat - - ireturn - - lll - - maintidx - - maligned - - misspell - - musttag - - nakedret - - nestif - - nlreturn - - nolintlint - - nonamedreturns - - nosnakecase - - paralleltest - - prealloc - - predeclared - - rowserrcheck - - scopelint - staticcheck - - structcheck - - stylecheck - - tagliatelle - - testpackage - - thelper - - unconvert - - unparam - - varcheck - - varnamelen - - wastedassign - - whitespace - - wrapcheck - - wsl diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index 52266da0f..6cb354c2c 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -1,13 +1,18 @@ -export GO111MODULE=off -export GOPROXY=https://proxy.golang.org - .PHONY: \ all \ + binary \ clean \ + codespell \ + containers-storage \ + cross \ default \ docs \ + gccgo \ help \ + install \ + install.docs \ install.tools \ + lint \ local-binary \ local-cross \ local-gccgo \ @@ -15,33 +20,25 @@ export GOPROXY=https://proxy.golang.org local-test-integration \ local-test-unit \ local-validate \ - lint \ - vendor + test-integration \ + test-unit \ + validate \ + vendor \ + vendor-in-container -PACKAGE := github.com/containers/storage -GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) -GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") -EPOCH_TEST_COMMIT := 0418ebf59f9e1f564831c0ba9378b7f8e40a1c73 NATIVETAGS := AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/libsubid_tag.sh) BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS) GO ?= go TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race) -# Go module support: set `-mod=vendor` to use the vendored sources -ifeq ($(shell $(GO) help mod >/dev/null 2>&1 && echo true), true) - GO:=GO111MODULE=on $(GO) - MOD_VENDOR=-mod=vendor -endif - default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs clean: ## remove all built files $(RM) -f containers-storage containers-storage.* docs/*.1 docs/*.5 -sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*.go internal/*/*.go pkg/*/*.go pkg/*/*/*.go types/*.go) -containers-storage: $(sources) ## build using gc on the host - $(GO) build $(MOD_VENDOR) -compiler gc $(BUILDFLAGS) ./cmd/containers-storage +containers-storage: ## build using gc on the host + $(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage codespell: codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L worl,flate,uint,iff,od,ERRO -w @@ -49,15 +46,15 @@ codespell: binary local-binary: containers-storage local-gccgo gccgo: ## build using gccgo on the host - GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build $(MOD_VENDOR) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage + GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage local-cross cross: ## cross build the binaries for arm, darwin, and freebsd @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le linux/s390x linux/mips linux/mipsle linux/mips64 linux/mips64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \ os=`echo $${target} | cut -f1 -d/` ; \ arch=`echo $${target} | cut -f2 -d/` ; \ suffix=$${os}.$${arch} ; \ - echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build $(MOD_VENDOR) -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage ; \ - env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build $(MOD_VENDOR) -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage || exit 1 ; \ + echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage ; \ + env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage || exit 1 ; \ done docs: install.tools ## build the docs on the host @@ -66,21 +63,17 @@ docs: install.tools ## build the docs on the host local-test: local-binary local-test-unit local-test-integration ## build the binaries and run the tests local-test-unit test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges) - @$(GO) test $(MOD_VENDOR) $(BUILDFLAGS) $(TESTFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor) + @$(GO) test -count 1 $(BUILDFLAGS) $(TESTFLAGS) ./... local-test-integration test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges) @cd tests; ./test_runner.bash -local-validate validate: install.tools ## validate DCO and gofmt on the host +local-validate validate: install.tools ## validate DCO on the host @./hack/git-validation.sh - @./hack/gofmt.sh install.tools: $(MAKE) -C tests/tools -$(FFJSON): - $(MAKE) -C tests/tools - install.docs: docs $(MAKE) -C docs install diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index aa3ed3a5e..21998d3c2 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.46.1 +1.47.0 diff --git a/vendor/github.com/containers/storage/check.go b/vendor/github.com/containers/storage/check.go new file mode 100644 index 000000000..e58084fc7 --- /dev/null +++ b/vendor/github.com/containers/storage/check.go @@ -0,0 +1,1153 @@ +package storage + +import ( + "archive/tar" + "errors" + "fmt" + "io" + "os" + "path" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/types" + "github.com/sirupsen/logrus" +) + +var ( + // ErrLayerUnaccounted describes a layer that is present in the lower-level storage driver, + // but which is not known to or managed by the higher-level driver-agnostic logic. + ErrLayerUnaccounted = types.ErrLayerUnaccounted + // ErrLayerUnreferenced describes a layer which is not used by any image or container. + ErrLayerUnreferenced = types.ErrLayerUnreferenced + // ErrLayerIncorrectContentDigest describes a layer for which the contents of one or more + // files which were added in the layer appear to have changed. It may instead look like an + // unnamed "file integrity checksum failed" error. + ErrLayerIncorrectContentDigest = types.ErrLayerIncorrectContentDigest + // ErrLayerIncorrectContentSize describes a layer for which regenerating the diff that was + // used to populate the layer produced a diff of a different size. We check the digest + // first, so it's highly unlikely you'll ever see this error. + ErrLayerIncorrectContentSize = types.ErrLayerIncorrectContentSize + // ErrLayerContentModified describes a layer which contains contents which should not be + // there, or for which ownership/permissions/dates have been changed. + ErrLayerContentModified = types.ErrLayerContentModified + // ErrLayerDataMissing describes a layer which is missing a big data item. + ErrLayerDataMissing = types.ErrLayerDataMissing + // ErrLayerMissing describes a layer which is the missing parent of a layer. + ErrLayerMissing = types.ErrLayerMissing + // ErrImageLayerMissing describes an image which claims to have a layer that we don't know + // about. + ErrImageLayerMissing = types.ErrImageLayerMissing + // ErrImageDataMissing describes an image which is missing a big data item. + ErrImageDataMissing = types.ErrImageDataMissing + // ErrImageDataIncorrectSize describes an image which has a big data item which looks like + // its size has changed, likely because it's been modified somehow. + ErrImageDataIncorrectSize = types.ErrImageDataIncorrectSize + // ErrContainerImageMissing describes a container which claims to be based on an image that + // we don't know about. + ErrContainerImageMissing = types.ErrContainerImageMissing + // ErrContainerDataMissing describes a container which is missing a big data item. + ErrContainerDataMissing = types.ErrContainerDataMissing + // ErrContainerDataIncorrectSize describes a container which has a big data item which looks + // like its size has changed, likely because it's been modified somehow. + ErrContainerDataIncorrectSize = types.ErrContainerDataIncorrectSize +) + +const ( + defaultMaximumUnreferencedLayerAge = 24 * time.Hour +) + +// CheckOptions is the set of options for Check(), specifying which tests to perform. +type CheckOptions struct { + LayerUnreferencedMaximumAge *time.Duration // maximum allowed age of unreferenced layers + LayerDigests bool // check that contents of image layer diffs can still be reconstructed + LayerMountable bool // check that layers are mountable + LayerContents bool // check that contents of image layers match their diffs, with no unexpected changes, requires LayerMountable + LayerData bool // check that associated "big" data items are present and can be read + ImageData bool // check that associated "big" data items are present, can be read, and match the recorded size + ContainerData bool // check that associated "big" data items are present and can be read +} + +// checkIgnore is used to tell functions that compare the contents of a mounted +// layer to the contents that we'd expect it to have to ignore certain +// discrepancies +type checkIgnore struct { + ownership, timestamps, permissions bool +} + +// CheckMost returns a CheckOptions with mostly just "quick" checks enabled. +func CheckMost() *CheckOptions { + return &CheckOptions{ + LayerDigests: true, + LayerMountable: true, + LayerContents: false, + LayerData: true, + ImageData: true, + ContainerData: true, + } +} + +// CheckEverything returns a CheckOptions with every check enabled. +func CheckEverything() *CheckOptions { + return &CheckOptions{ + LayerDigests: true, + LayerMountable: true, + LayerContents: true, + LayerData: true, + ImageData: true, + ContainerData: true, + } +} + +// CheckReport is a list of detected problems. +type CheckReport struct { + Layers map[string][]error // damaged read-write layers + ROLayers map[string][]error // damaged read-only layers + layerParentsByLayerID map[string]string + layerOrder map[string]int + Images map[string][]error // damaged read-write images (including those with damaged layers) + ROImages map[string][]error // damaged read-only images (including those with damaged layers) + Containers map[string][]error // damaged containers (including those based on damaged images) +} + +// RepairOptions is the set of options for Repair(). +type RepairOptions struct { + RemoveContainers bool // Remove damaged containers +} + +// RepairEverything returns a RepairOptions with every optional remediation +// enabled. +func RepairEverything() *RepairOptions { + return &RepairOptions{ + RemoveContainers: true, + } +} + +// Check returns a list of problems with what's in the store, as a whole. It can be very expensive +// to call. +func (s *store) Check(options *CheckOptions) (CheckReport, error) { + var ignore checkIgnore + for _, o := range s.graphOptions { + if strings.Contains(o, "ignore_chown_errors=true") { + ignore.ownership = true + } + if strings.HasPrefix(o, "force_mask=") { + ignore.permissions = true + } + } + for o := range s.pullOptions { + if strings.Contains(o, "use_hard_links") { + if s.pullOptions[o] == "true" { + ignore.timestamps = true + } + } + } + + if options == nil { + options = CheckMost() + } + + report := CheckReport{ + Layers: make(map[string][]error), + ROLayers: make(map[string][]error), + layerParentsByLayerID: make(map[string]string), // layers ID -> their parent's ID, if there is one + layerOrder: make(map[string]int), // layers ID -> order for removal, if we needed to remove them all + Images: make(map[string][]error), + ROImages: make(map[string][]error), + Containers: make(map[string][]error), + } + + // This map will track known layer IDs. If we have multiple stores, read-only ones can + // contain copies of layers that are in the read-write store, but we'll only ever be + // mounting or extracting contents from the read-write versions, since we always search it + // first. The boolean will track if the layer is referenced by at least one image or + // container. + referencedLayers := make(map[string]bool) + referencedROLayers := make(map[string]bool) + + // This map caches the headers for items included in layer diffs. + diffHeadersByLayer := make(map[string][]*tar.Header) + var diffHeadersByLayerMutex sync.Mutex + + // Walk the list of layer stores, looking at each layer that we didn't see in a + // previously-visited store. + if _, _, err := readOrWriteAllLayerStores(s, func(store roLayerStore) (struct{}, bool, error) { + layers, err := store.Layers() + if err != nil { + return struct{}{}, true, err + } + isReadWrite := roLayerStoreIsReallyReadWrite(store) + readWriteDesc := "" + if !isReadWrite { + readWriteDesc = "read-only " + } + // Examine each layer in turn. + for i := range layers { + layer := layers[i] + id := layer.ID + // If we've already seen a layer with this ID, no need to process it again. + if _, checked := referencedLayers[id]; checked { + continue + } + if _, checked := referencedROLayers[id]; checked { + continue + } + // Note the parent of this layer, and add it to the map of known layers so + // that we know that we've visited it, but we haven't confirmed that it's + // used by anything. + report.layerParentsByLayerID[id] = layer.Parent + if isReadWrite { + referencedLayers[id] = false + } else { + referencedROLayers[id] = false + } + logrus.Debugf("checking %slayer %s", readWriteDesc, id) + // Check that all of the big data items are present and can be read. We + // have no digest or size information to compare the contents to (grumble), + // so we can't verify that the contents haven't been changed since they + // were stored. + if options.LayerData { + for _, name := range layer.BigDataNames { + func() { + rc, err := store.BigData(id, name) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + err := fmt.Errorf("%slayer %s: data item %q: %w", readWriteDesc, id, name, ErrLayerDataMissing) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + err = fmt.Errorf("%slayer %s: data item %q: %w", readWriteDesc, id, name, err) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + defer rc.Close() + if _, err = io.Copy(io.Discard, rc); err != nil { + err = fmt.Errorf("%slayer %s: data item %q: %w", readWriteDesc, id, name, err) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + }() + } + } + // Check that the content we get back when extracting the layer's contents + // match the recorded digest and size. A layer for which they're not given + // isn't a part of an image, and is likely the read-write layer for a + // container, and we can't vouch for the integrity of its contents. + // For each layer with known contents, record the headers for the layer's + // diff, which we can use to reconstruct the expected contents for the tree + // we see when the layer is mounted. + if options.LayerDigests && layer.UncompressedDigest != "" { + func() { + expectedDigest := layer.UncompressedDigest + // Double-check that the digest isn't invalid somehow. + if err := layer.UncompressedDigest.Validate(); err != nil { + err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + // Extract the diff. + uncompressed := archive.Uncompressed + diffOptions := DiffOptions{ + Compression: &uncompressed, + } + diff, err := store.Diff("", id, &diffOptions) + if err != nil { + err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + // Digest and count the length of the diff. + digester := expectedDigest.Algorithm().Digester() + counter := ioutils.NewWriteCounter(digester.Hash()) + reader := io.TeeReader(diff, counter) + var wg sync.WaitGroup + var archiveErr error + wg.Add(1) + go func(layerID string, diffReader io.Reader) { + // Read the diff, one item at a time. + tr := tar.NewReader(diffReader) + hdr, err := tr.Next() + for err == nil { + diffHeadersByLayerMutex.Lock() + diffHeadersByLayer[layerID] = append(diffHeadersByLayer[layerID], hdr) + diffHeadersByLayerMutex.Unlock() + hdr, err = tr.Next() + } + if !errors.Is(err, io.EOF) { + archiveErr = err + } + // consume any trailer after the EOF marker + io.Copy(io.Discard, diffReader) + wg.Done() + }(id, reader) + wg.Wait() + diff.Close() + if archiveErr != nil { + // Reading the diff didn't end as expected + diffHeadersByLayerMutex.Lock() + delete(diffHeadersByLayer, id) + diffHeadersByLayerMutex.Unlock() + archiveErr = fmt.Errorf("%slayer %s: %w", readWriteDesc, id, archiveErr) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], archiveErr) + } else { + report.ROLayers[id] = append(report.ROLayers[id], archiveErr) + } + return + } + if digester.Digest() != layer.UncompressedDigest { + // The diff digest didn't match. + diffHeadersByLayerMutex.Lock() + delete(diffHeadersByLayer, id) + diffHeadersByLayerMutex.Unlock() + err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, ErrLayerIncorrectContentDigest) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + } + if layer.UncompressedSize != -1 && counter.Count != layer.UncompressedSize { + // We expected the diff to have a specific size, and + // it didn't match. + diffHeadersByLayerMutex.Lock() + delete(diffHeadersByLayer, id) + diffHeadersByLayerMutex.Unlock() + err := fmt.Errorf("%slayer %s: read %d bytes instead of %d bytes: %w", readWriteDesc, id, counter.Count, layer.UncompressedSize, ErrLayerIncorrectContentSize) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + } + }() + } + } + // At this point we're out of things that we can be sure will work in read-only + // stores, so skip the rest for any stores that aren't also read-write stores. + if !isReadWrite { + return struct{}{}, false, nil + } + // Content and mount checks are also things that we can only be sure will work in + // read-write stores. + for i := range layers { + layer := layers[i] + id := layer.ID + // Compare to what we see when we mount the layer and walk the tree, and + // flag cases where content is in the layer that shouldn't be there. The + // tar-split implementation of Diff() won't catch this problem by itself. + if options.LayerMountable { + func() { + // Mount the layer. + mountPoint, err := s.graphDriver.Get(id, drivers.MountOpts{MountLabel: layer.MountLabel}) + if err != nil { + err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + // Unmount the layer when we're done in here. + defer func() { + if err := s.graphDriver.Put(id); err != nil { + err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + }() + // If we're not looking at layer contents, or we didn't + // look at the diff for this layer, we're done here. + if !options.LayerDigests || layer.UncompressedDigest == "" || !options.LayerContents { + return + } + // Build a list of all of the changes in all of the layers + // that make up the tree we're looking at. + diffHeaderSet := [][]*tar.Header{} + // If we don't know _all_ of the changes that produced this + // layer, it's not part of an image, so we're done here. + for layerID := id; layerID != ""; layerID = report.layerParentsByLayerID[layerID] { + diffHeadersByLayerMutex.Lock() + layerChanges, haveChanges := diffHeadersByLayer[layerID] + diffHeadersByLayerMutex.Unlock() + if !haveChanges { + return + } + // The diff headers for this layer go _before_ those of + // layers that inherited some of its contents. + diffHeaderSet = append([][]*tar.Header{layerChanges}, diffHeaderSet...) + } + expectedCheckDirectory := newCheckDirectoryDefaults() + for _, diffHeaders := range diffHeaderSet { + expectedCheckDirectory.headers(diffHeaders) + } + // Scan the directory tree under the mount point. + var idmap *idtools.IDMappings + if !s.canUseShifting(layer.UIDMap, layer.GIDMap) { + // we would have had to chown() layer contents to match ID maps + idmap = idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap) + } + actualCheckDirectory, err := newCheckDirectoryFromDirectory(mountPoint) + if err != nil { + err := fmt.Errorf("scanning contents of %slayer %s: %w", readWriteDesc, id, err) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + // Every departure from our expectations is an error. + diffs := compareCheckDirectory(expectedCheckDirectory, actualCheckDirectory, idmap, ignore) + for _, diff := range diffs { + err := fmt.Errorf("%slayer %s: %s, %w", readWriteDesc, id, diff, ErrLayerContentModified) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + } + }() + } + } + // Check that we don't have any dangling parent layer references. + for id, parent := range report.layerParentsByLayerID { + // If this layer doesn't have a parent, no problem. + if parent == "" { + continue + } + // If we've already seen a layer with this parent ID, skip it. + if _, checked := referencedLayers[parent]; checked { + continue + } + if _, checked := referencedROLayers[parent]; checked { + continue + } + // We haven't seen a layer with the ID that this layer's record + // says is its parent's ID. + err := fmt.Errorf("%slayer %s: %w", readWriteDesc, parent, ErrLayerMissing) + report.Layers[id] = append(report.Layers[id], err) + } + return struct{}{}, false, nil + }); err != nil { + return CheckReport{}, err + } + + // This map will track examined images. If we have multiple stores, read-only ones can + // contain copies of images that are also in the read-write store, or the read-write store + // may contain a duplicate entry that refers to layers in the read-only stores, but when + // trying to export them, we only look at the first copy of the image. + examinedImages := make(map[string]struct{}) + + // Walk the list of image stores, looking at each image that we didn't see in a + // previously-visited store. + if _, _, err := readAllImageStores(s, func(store roImageStore) (struct{}, bool, error) { + images, err := store.Images() + if err != nil { + return struct{}{}, true, err + } + isReadWrite := roImageStoreIsReallyReadWrite(store) + readWriteDesc := "" + if !isReadWrite { + readWriteDesc = "read-only " + } + // Examine each image in turn. + for i := range images { + image := images[i] + id := image.ID + // If we've already seen an image with this ID, skip it. + if _, checked := examinedImages[id]; checked { + continue + } + examinedImages[id] = struct{}{} + logrus.Debugf("checking %simage %s", readWriteDesc, id) + if options.ImageData { + // Check that all of the big data items are present and reading them + // back gives us the right amount of data. Even though we record + // digests that can be used to look them up, we don't know how they + // were calculated (they're only used as lookup keys), so do not try + // to check them. + for _, key := range image.BigDataNames { + func() { + data, err := store.BigData(id, key) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + err = fmt.Errorf("%simage %s: data item %q: %w", readWriteDesc, id, key, ErrImageDataMissing) + if isReadWrite { + report.Images[id] = append(report.Images[id], err) + } else { + report.ROImages[id] = append(report.ROImages[id], err) + } + return + } + err = fmt.Errorf("%simage %s: data item %q: %w", readWriteDesc, id, key, err) + if isReadWrite { + report.Images[id] = append(report.Images[id], err) + } else { + report.ROImages[id] = append(report.ROImages[id], err) + } + return + } + if int64(len(data)) != image.BigDataSizes[key] { + err = fmt.Errorf("%simage %s: data item %q: %w", readWriteDesc, id, key, ErrImageDataIncorrectSize) + if isReadWrite { + report.Images[id] = append(report.Images[id], err) + } else { + report.ROImages[id] = append(report.ROImages[id], err) + } + return + } + }() + } + } + // Walk the layers list for the image. For every layer that the image uses + // that has errors, the layer's errors are also the image's errors. + examinedImageLayers := make(map[string]struct{}) + for _, topLayer := range append([]string{image.TopLayer}, image.MappedTopLayers...) { + if topLayer == "" { + continue + } + if _, checked := examinedImageLayers[topLayer]; checked { + continue + } + examinedImageLayers[topLayer] = struct{}{} + for layer := topLayer; layer != ""; layer = report.layerParentsByLayerID[layer] { + // The referenced layer should have a corresponding entry in + // one map or the other. + _, checked := referencedLayers[layer] + _, checkedRO := referencedROLayers[layer] + if !checked && !checkedRO { + err := fmt.Errorf("layer %s: %w", layer, ErrImageLayerMissing) + err = fmt.Errorf("%simage %s: %w", readWriteDesc, id, err) + if isReadWrite { + report.Images[id] = append(report.Images[id], err) + } else { + report.ROImages[id] = append(report.ROImages[id], err) + } + } else { + // Count this layer as referenced. Whether by the + // image or one of its child layers doesn't matter + // at this point. + if _, ok := referencedLayers[layer]; ok { + referencedLayers[layer] = true + } + if _, ok := referencedROLayers[layer]; ok { + referencedROLayers[layer] = true + } + } + if isReadWrite { + if len(report.Layers[layer]) > 0 { + report.Images[id] = append(report.Images[id], report.Layers[layer]...) + } + if len(report.ROLayers[layer]) > 0 { + report.Images[id] = append(report.Images[id], report.ROLayers[layer]...) + } + } else { + if len(report.Layers[layer]) > 0 { + report.ROImages[id] = append(report.ROImages[id], report.Layers[layer]...) + } + if len(report.ROLayers[layer]) > 0 { + report.ROImages[id] = append(report.ROImages[id], report.ROLayers[layer]...) + } + } + } + } + } + return struct{}{}, false, nil + }); err != nil { + return CheckReport{}, err + } + + // Iterate over each container in turn. + if _, _, err := readContainerStore(s, func() (struct{}, bool, error) { + containers, err := s.containerStore.Containers() + if err != nil { + return struct{}{}, true, err + } + for i := range containers { + container := containers[i] + id := container.ID + logrus.Debugf("checking container %s", id) + if options.ContainerData { + // Check that all of the big data items are present and reading them + // back gives us the right amount of data. + for _, key := range container.BigDataNames { + func() { + data, err := s.containerStore.BigData(id, key) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + err = fmt.Errorf("container %s: data item %q: %w", id, key, ErrContainerDataMissing) + report.Containers[id] = append(report.Containers[id], err) + return + } + err = fmt.Errorf("container %s: data item %q: %w", id, key, err) + report.Containers[id] = append(report.Containers[id], err) + return + } + if int64(len(data)) != container.BigDataSizes[key] { + err = fmt.Errorf("container %s: data item %q: %w", id, key, ErrContainerDataIncorrectSize) + report.Containers[id] = append(report.Containers[id], err) + return + } + }() + } + } + // Look at the container's base image. If the image has errors, the image's errors + // are the container's errors. + if container.ImageID != "" { + if _, checked := examinedImages[container.ImageID]; !checked { + err := fmt.Errorf("image %s: %w", container.ImageID, ErrContainerImageMissing) + report.Containers[id] = append(report.Containers[id], err) + } + if len(report.Images[container.ImageID]) > 0 { + report.Containers[id] = append(report.Containers[id], report.Images[container.ImageID]...) + } + if len(report.ROImages[container.ImageID]) > 0 { + report.Containers[id] = append(report.Containers[id], report.ROImages[container.ImageID]...) + } + } + // Count the container's layer as referenced. + if container.LayerID != "" { + referencedLayers[container.LayerID] = true + } + } + return struct{}{}, false, nil + }); err != nil { + return CheckReport{}, err + } + + // Now go back through all of the layer stores, and flag any layers which don't belong + // to an image or a container, and has been around longer than we can reasonably expect + // such a layer to be present before a corresponding image record is added. + if _, _, err := readAllLayerStores(s, func(store roLayerStore) (struct{}, bool, error) { + if isReadWrite := roLayerStoreIsReallyReadWrite(store); !isReadWrite { + return struct{}{}, false, nil + } + layers, err := store.Layers() + if err != nil { + return struct{}{}, true, err + } + for _, layer := range layers { + maximumAge := defaultMaximumUnreferencedLayerAge + if options.LayerUnreferencedMaximumAge != nil { + maximumAge = *options.LayerUnreferencedMaximumAge + } + if referenced := referencedLayers[layer.ID]; !referenced { + if layer.Created.IsZero() || layer.Created.Add(maximumAge).Before(time.Now()) { + // Either we don't (and never will) know when this layer was + // created, or it was created far enough in the past that we're + // reasonably sure it's not part of an image that's being written + // right now. + err := fmt.Errorf("layer %s: %w", layer.ID, ErrLayerUnreferenced) + report.Layers[layer.ID] = append(report.Layers[layer.ID], err) + } + } + } + return struct{}{}, false, nil + }); err != nil { + return CheckReport{}, err + } + + // If the driver can tell us about which layers it knows about, we should have previously + // examined all of them. Any that we didn't are probably just wasted space. + // Note: if the driver doesn't support enumerating layers, it returns ErrNotSupported. + if err := s.startUsingGraphDriver(); err != nil { + return CheckReport{}, err + } + defer s.stopUsingGraphDriver() + layerList, err := s.graphDriver.ListLayers() + if err != nil && !errors.Is(err, drivers.ErrNotSupported) { + return CheckReport{}, err + } + if !errors.Is(err, drivers.ErrNotSupported) { + for i, id := range layerList { + if _, known := referencedLayers[id]; !known { + err := fmt.Errorf("layer %s: %w", id, ErrLayerUnaccounted) + report.Layers[id] = append(report.Layers[id], err) + } + report.layerOrder[id] = i + 1 + } + } + + return report, nil +} + +func roLayerStoreIsReallyReadWrite(store roLayerStore) bool { + return store.(*layerStore).lockfile.IsReadWrite() +} + +func roImageStoreIsReallyReadWrite(store roImageStore) bool { + return store.(*imageStore).lockfile.IsReadWrite() +} + +// Repair removes items which are themselves damaged, or which depend on items which are damaged. +// Errors are returned if an attempt to delete an item fails. +func (s *store) Repair(report CheckReport, options *RepairOptions) []error { + if options == nil { + options = RepairEverything() + } + var errs []error + // Just delete damaged containers. + if options.RemoveContainers { + for id := range report.Containers { + err := s.DeleteContainer(id) + if err != nil && !errors.Is(err, ErrContainerUnknown) { + err := fmt.Errorf("deleting container %s: %w", id, err) + errs = append(errs, err) + } + } + } + // Now delete damaged images. Note which layers were removed as part of removing those images. + deletedLayers := make(map[string]struct{}) + for id := range report.Images { + layers, err := s.DeleteImage(id, true) + if err != nil { + if !errors.Is(err, ErrImageUnknown) && !errors.Is(err, ErrLayerUnknown) { + err := fmt.Errorf("deleting image %s: %w", id, err) + errs = append(errs, err) + } + } else { + for _, layer := range layers { + logrus.Debugf("deleted layer %s", layer) + deletedLayers[layer] = struct{}{} + } + logrus.Debugf("deleted image %s", id) + } + } + // Build a list of the layers that we need to remove, sorted with parents of layers before + // layers that they are parents of. + layersToDelete := make([]string, 0, len(report.Layers)) + for id := range report.Layers { + layersToDelete = append(layersToDelete, id) + } + depth := func(id string) int { + d := 0 + parent := report.layerParentsByLayerID[id] + for parent != "" { + d++ + parent = report.layerParentsByLayerID[parent] + } + return d + } + isUnaccounted := func(errs []error) bool { + for _, err := range errs { + if errors.Is(err, ErrLayerUnaccounted) { + return true + } + } + return false + } + sort.Slice(layersToDelete, func(i, j int) bool { + // we've not heard of either of them, so remove them in the order the driver suggested + if isUnaccounted(report.Layers[layersToDelete[i]]) && + isUnaccounted(report.Layers[layersToDelete[j]]) && + report.layerOrder[layersToDelete[i]] != 0 && report.layerOrder[layersToDelete[j]] != 0 { + return report.layerOrder[layersToDelete[i]] < report.layerOrder[layersToDelete[j]] + } + // always delete the one we've heard of first + if isUnaccounted(report.Layers[layersToDelete[i]]) && !isUnaccounted(report.Layers[layersToDelete[j]]) { + return false + } + // always delete the one we've heard of first + if !isUnaccounted(report.Layers[layersToDelete[i]]) && isUnaccounted(report.Layers[layersToDelete[j]]) { + return true + } + // we've heard of both of them; the one that's on the end of a longer chain goes first + return depth(layersToDelete[i]) > depth(layersToDelete[j]) // closer-to-a-notional-base layers get removed later + }) + // Now delete the layers that haven't been removed along with images. + for _, id := range layersToDelete { + if _, ok := deletedLayers[id]; ok { + continue + } + for _, reportedErr := range report.Layers[id] { + var err error + // If a layer was unaccounted for, remove it at the storage driver level. + // Otherwise, remove it at the higher level and let the higher level + // logic worry about telling the storage driver to delete the layer. + if errors.Is(reportedErr, ErrLayerUnaccounted) { + if err = s.graphDriver.Remove(id); err != nil { + err = fmt.Errorf("deleting storage layer %s: %v", id, err) + } else { + logrus.Debugf("deleted storage layer %s", id) + } + } else { + var stillMounted bool + if stillMounted, err = s.Unmount(id, true); err == nil && !stillMounted { + logrus.Debugf("unmounted layer %s", id) + } else if err != nil { + logrus.Debugf("unmounting layer %s: %v", id, err) + } else { + logrus.Debugf("layer %s still mounted", id) + } + if err = s.DeleteLayer(id); err != nil { + err = fmt.Errorf("deleting layer %s: %w", id, err) + logrus.Debugf("deleted layer %s", id) + } + } + if err != nil && !errors.Is(err, ErrLayerUnknown) && !errors.Is(err, ErrNotALayer) && !errors.Is(err, os.ErrNotExist) { + errs = append(errs, err) + } + } + } + return errs +} + +// compareFileInfo returns a string summarizing what's different between the two checkFileInfos +func compareFileInfo(a, b checkFileInfo, idmap *idtools.IDMappings, ignore checkIgnore) string { + var comparison []string + if a.typeflag != b.typeflag { + comparison = append(comparison, fmt.Sprintf("filetype:%v→%v", a.typeflag, b.typeflag)) + } + if idmap != nil && !idmap.Empty() { + mappedUID, mappedGID, err := idmap.ToContainer(idtools.IDPair{UID: b.uid, GID: b.gid}) + if err != nil { + return err.Error() + } + b.uid, b.gid = mappedUID, mappedGID + } + if a.uid != b.uid && !ignore.ownership { + comparison = append(comparison, fmt.Sprintf("uid:%d→%d", a.uid, b.uid)) + } + if a.gid != b.gid && !ignore.ownership { + comparison = append(comparison, fmt.Sprintf("gid:%d→%d", a.gid, b.gid)) + } + if a.size != b.size { + comparison = append(comparison, fmt.Sprintf("size:%d→%d", a.size, b.size)) + } + if (os.ModeType|os.ModePerm)&a.mode != (os.ModeType|os.ModePerm)&b.mode && !ignore.permissions { + comparison = append(comparison, fmt.Sprintf("mode:%04o→%04o", a.mode, b.mode)) + } + if a.mtime != b.mtime && !ignore.timestamps { + comparison = append(comparison, fmt.Sprintf("mtime:0x%x→0x%x", a.mtime, b.mtime)) + } + return strings.Join(comparison, ",") +} + +// checkFileInfo is what we care about for files +type checkFileInfo struct { + typeflag byte + uid, gid int + size int64 + mode os.FileMode + mtime int64 // unix-style whole seconds +} + +// checkDirectory is a node in a filesystem record, possibly the top +type checkDirectory struct { + directory map[string]*checkDirectory // subdirectories + file map[string]checkFileInfo // non-directories + checkFileInfo +} + +// newCheckDirectory creates an empty checkDirectory +func newCheckDirectory(uid, gid int, size int64, mode os.FileMode, mtime int64) *checkDirectory { + return &checkDirectory{ + directory: make(map[string]*checkDirectory), + file: make(map[string]checkFileInfo), + checkFileInfo: checkFileInfo{ + typeflag: tar.TypeDir, + uid: uid, + gid: gid, + size: size, + mode: mode, + mtime: mtime, + }, + } +} + +// newCheckDirectoryDefaults creates an empty checkDirectory with hardwired defaults for the UID +// (0), GID (0), size (0) and permissions (0o555) +func newCheckDirectoryDefaults() *checkDirectory { + return newCheckDirectory(0, 0, 0, 0o555, time.Now().Unix()) +} + +// newCheckDirectoryFromDirectory creates a checkDirectory for an on-disk directory tree +func newCheckDirectoryFromDirectory(dir string) (*checkDirectory, error) { + cd := newCheckDirectoryDefaults() + err := filepath.Walk(dir, func(walkpath string, info os.FileInfo, err error) error { + if err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + rel, err := filepath.Rel(dir, walkpath) + if err != nil { + return err + } + hdr, err := tar.FileInfoHeader(info, "") // we don't record link targets, so don't bother looking it up + if err != nil { + return err + } + hdr.Name = filepath.ToSlash(rel) + cd.header(hdr) + return nil + }) + if err != nil { + return nil, err + } + return cd, nil +} + +// add adds an item to a checkDirectory +func (c *checkDirectory) add(path string, typeflag byte, uid, gid int, size int64, mode os.FileMode, mtime int64) { + components := strings.Split(path, "/") + if components[len(components)-1] == "" { + components = components[:len(components)-1] + } + if components[0] == "." { + components = components[1:] + } + if typeflag != tar.TypeReg { + size = 0 + } + switch len(components) { + case 0: + c.uid = uid + c.gid = gid + c.mode = mode + c.mtime = mtime + return + case 1: + switch typeflag { + case tar.TypeDir: + delete(c.file, components[0]) + // directory entries are mergers, not replacements + if _, present := c.directory[components[0]]; !present { + c.directory[components[0]] = newCheckDirectory(uid, gid, size, mode, mtime) + } else { + c.directory[components[0]].checkFileInfo = checkFileInfo{ + typeflag: tar.TypeDir, + uid: uid, + gid: gid, + size: size, + mode: mode, + mtime: mtime, + } + } + default: + // treat these as TypeReg items + delete(c.directory, components[0]) + c.file[components[0]] = checkFileInfo{ + typeflag: typeflag, + uid: uid, + gid: gid, + size: size, + mode: mode, + mtime: mtime, + } + case tar.TypeXGlobalHeader: + // ignore, since even though it looks like a valid pathname, it doesn't end + // up on the filesystem + } + return + } + subdirectory := c.directory[components[0]] + if subdirectory == nil { + subdirectory = newCheckDirectory(uid, gid, size, mode, mtime) + c.directory[components[0]] = subdirectory + } + subdirectory.add(strings.Join(components[1:], "/"), typeflag, uid, gid, size, mode, mtime) +} + +// remove removes an item from a checkDirectory +func (c *checkDirectory) remove(path string) { + components := strings.Split(path, "/") + if len(components) == 1 { + delete(c.directory, components[0]) + delete(c.file, components[0]) + return + } + subdirectory := c.directory[components[0]] + if subdirectory != nil { + subdirectory.remove(strings.Join(components[1:], "/")) + } +} + +// header updates a checkDirectory using information from the passed-in header +func (c *checkDirectory) header(hdr *tar.Header) { + name := path.Clean(hdr.Name) + dir, base := path.Split(name) + if strings.HasPrefix(base, archive.WhiteoutPrefix) { + if base == archive.WhiteoutOpaqueDir { + c.remove(path.Clean(dir)) + c.add(path.Clean(dir), tar.TypeDir, hdr.Uid, hdr.Gid, hdr.Size, os.FileMode(hdr.Mode), hdr.ModTime.Unix()) + } else { + c.remove(path.Join(dir, base[len(archive.WhiteoutPrefix):])) + } + } else { + if hdr.Typeflag == tar.TypeLink { + // look up the attributes of the target of the hard link + // n.b. by convention, Linkname is always relative to the + // root directory of the archive, which is not always the + // same as being relative to hdr.Name + directory := c + for _, component := range strings.Split(path.Clean(hdr.Linkname), "/") { + if component == "." || component == ".." { + continue + } + if subdir, ok := directory.directory[component]; ok { + directory = subdir + continue + } + if file, ok := directory.file[component]; ok { + hdr.Typeflag = file.typeflag + hdr.Uid = file.uid + hdr.Gid = file.gid + hdr.Size = file.size + hdr.Mode = int64(file.mode) + hdr.ModTime = time.Unix(file.mtime, 0) + } + break + } + } + c.add(name, hdr.Typeflag, hdr.Uid, hdr.Gid, hdr.Size, os.FileMode(hdr.Mode), hdr.ModTime.Unix()) + } +} + +// headers updates a checkDirectory using information from the passed-in header slice +func (c *checkDirectory) headers(hdrs []*tar.Header) { + hdrs = append([]*tar.Header{}, hdrs...) + // sort the headers from the diff to ensure that whiteouts appear + // before content when they both appear in the same directory, per + // https://github.com/opencontainers/image-spec/blob/main/layer.md#whiteouts + // and that hard links appear after other types of entries + sort.SliceStable(hdrs, func(i, j int) bool { + if hdrs[i].Typeflag != tar.TypeLink && hdrs[j].Typeflag == tar.TypeLink { + return true + } + if hdrs[i].Typeflag == tar.TypeLink && hdrs[j].Typeflag != tar.TypeLink { + return false + } + idir, ifile := path.Split(hdrs[i].Name) + jdir, jfile := path.Split(hdrs[j].Name) + if idir != jdir { + return hdrs[i].Name < hdrs[j].Name + } + if ifile == archive.WhiteoutOpaqueDir { + return true + } + if strings.HasPrefix(ifile, archive.WhiteoutPrefix) && !strings.HasPrefix(jfile, archive.WhiteoutPrefix) { + return true + } + return false + }) + for _, hdr := range hdrs { + c.header(hdr) + } +} + +// names provides a sorted list of the path names in the directory tree +func (c *checkDirectory) names() []string { + names := make([]string, 0, len(c.file)+len(c.directory)) + for name := range c.file { + names = append(names, name) + } + for name, subdirectory := range c.directory { + names = append(names, name+"/") + for _, subname := range subdirectory.names() { + names = append(names, name+"/"+subname) + } + } + return names +} + +// compareCheckSubdirectory walks two subdirectory trees and returns a list of differences +func compareCheckSubdirectory(path string, a, b *checkDirectory, idmap *idtools.IDMappings, ignore checkIgnore) []string { + var diff []string + if a == nil { + a = newCheckDirectoryDefaults() + } + if b == nil { + b = newCheckDirectoryDefaults() + } + for aname, adir := range a.directory { + if bdir, present := b.directory[aname]; !present { + // directory was removed + diff = append(diff, "-"+path+"/"+aname+"/") + diff = append(diff, compareCheckSubdirectory(path+"/"+aname, adir, nil, idmap, ignore)...) + } else { + // directory is in both trees; descend + if attributes := compareFileInfo(adir.checkFileInfo, bdir.checkFileInfo, idmap, ignore); attributes != "" { + diff = append(diff, path+"/"+aname+"("+attributes+")") + } + diff = append(diff, compareCheckSubdirectory(path+"/"+aname, adir, bdir, idmap, ignore)...) + } + } + for bname, bdir := range b.directory { + if _, present := a.directory[bname]; !present { + // directory added + diff = append(diff, "+"+path+"/"+bname+"/") + diff = append(diff, compareCheckSubdirectory(path+"/"+bname, nil, bdir, idmap, ignore)...) + } + } + for aname, afile := range a.file { + if bfile, present := b.file[aname]; !present { + // non-directory removed or replaced + diff = append(diff, "-"+path+"/"+aname) + } else { + // item is in both trees; compare + if attributes := compareFileInfo(afile, bfile, idmap, ignore); attributes != "" { + diff = append(diff, path+"/"+aname+"("+attributes+")") + } + } + } + for bname := range b.file { + filetype, present := a.file[bname] + if !present { + // non-directory added or replaced with something else + diff = append(diff, "+"+path+"/"+bname) + continue + } + if attributes := compareFileInfo(filetype, b.file[bname], idmap, ignore); attributes != "" { + // non-directory replaced with non-directory + diff = append(diff, "+"+path+"/"+bname+"("+attributes+")") + } + } + return diff +} + +// compareCheckDirectory walks two directory trees and returns a sorted list of differences +func compareCheckDirectory(a, b *checkDirectory, idmap *idtools.IDMappings, ignore checkIgnore) []string { + diff := compareCheckSubdirectory("", a, b, idmap, ignore) + sort.Slice(diff, func(i, j int) bool { + if strings.Compare(diff[i][1:], diff[j][1:]) < 0 { + return true + } + if diff[i][0] == '-' { + return true + } + return false + }) + return diff +} diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index 5866b2f98..a7dfb405b 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -523,13 +523,20 @@ func (r *containerStore) load(lockedForWriting bool) (bool, error) { // The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes). func (r *containerStore) save(saveLocations containerLocations) error { r.lockfile.AssertLockedForWriting() + // This must be done before we write the file, because the process could be terminated + // after the file is written but before the lock file is updated. + lw, err := r.lockfile.RecordWrite() + if err != nil { + return err + } + r.lastWrite = lw for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ { location := containerLocationFromIndex(locationIndex) if location&saveLocations == 0 { continue } rpath := r.jsonPath[locationIndex] - if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { + if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil { return err } subsetContainers := make([]*Container, 0, len(r.containers)) @@ -549,15 +556,10 @@ func (r *containerStore) save(saveLocations containerLocations) error { NoSync: true, } } - if err := ioutils.AtomicWriteFileWithOpts(rpath, jdata, 0600, opts); err != nil { + if err := ioutils.AtomicWriteFileWithOpts(rpath, jdata, 0o600, opts); err != nil { return err } } - lw, err := r.lockfile.RecordWrite() - if err != nil { - return err - } - r.lastWrite = lw return nil } @@ -569,12 +571,12 @@ func (r *containerStore) saveFor(modifiedContainer *Container) error { } func newContainerStore(dir string, runDir string, transient bool) (rwContainerStore, error) { - if err := os.MkdirAll(dir, 0700); err != nil { + if err := os.MkdirAll(dir, 0o700); err != nil { return nil, err } volatileDir := dir if transient { - if err := os.MkdirAll(runDir, 0700); err != nil { + if err := os.MkdirAll(runDir, 0o700); err != nil { return nil, err } volatileDir = runDir @@ -926,10 +928,10 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error { if !ok { return ErrContainerUnknown } - if err := os.MkdirAll(r.datadir(c.ID), 0700); err != nil { + if err := os.MkdirAll(r.datadir(c.ID), 0o700); err != nil { return err } - err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0600) + err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0o600) if err == nil { save := false if c.BigDataSizes == nil { diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go index 301ee24d2..0b1766210 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -64,7 +64,7 @@ var ( enableDirperm bool ) -const defaultPerms = os.FileMode(0555) +const defaultPerms = os.FileMode(0o555) func init() { graphdriver.MustRegister("aufs", Init) @@ -87,11 +87,9 @@ type Driver struct { // Init returns a new AUFS driver. // An error is returned if AUFS is not supported. func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { - // Try to load the aufs kernel module if err := supportsAufs(); err != nil { return nil, fmt.Errorf("kernel does not support aufs: %w", graphdriver.ErrNotSupported) - } fsMagic, err := graphdriver.GetFSMagic(home) @@ -145,7 +143,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) // Create the root aufs driver dir and return // if it already exists // If not populate the dir structure - if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(home, 0o700, rootUID, rootGID); err != nil { if os.IsExist(err) { return a, nil } @@ -158,7 +156,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) // Populate the dir structure for _, p := range paths { - if err := idtools.MkdirAllAs(path.Join(home, p), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(path.Join(home, p), 0o700, rootUID, rootGID); err != nil { return nil, err } } @@ -251,9 +249,21 @@ func (a *Driver) Exists(id string) bool { return true } -// List layers (not including additional image stores) +// ListLayers() returns all of the layers known to the driver. func (a *Driver) ListLayers() ([]string, error) { - return nil, graphdriver.ErrNotSupported + diffsDir := filepath.Join(a.rootPath(), "diff") + entries, err := os.ReadDir(diffsDir) + if err != nil { + return nil, err + } + results := make([]string, 0, len(entries)) + for _, entry := range entries { + if !entry.IsDir() { + continue + } + results = append(results, entry.Name()) + } + return results, nil } // AdditionalImageStores returns additional image stores supported by the driver @@ -278,7 +288,6 @@ func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts // Create three folders for each id // mnt, layers, and diff func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - if opts != nil && len(opts.StorageOpt) != 0 { return fmt.Errorf("--storage-opt is not supported for aufs") } diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go index 8452fa189..42d55c1a7 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -42,7 +42,7 @@ import ( "golang.org/x/sys/unix" ) -const defaultPerms = os.FileMode(0555) +const defaultPerms = os.FileMode(0o555) func init() { graphdriver.MustRegister("btrfs", Init) @@ -56,7 +56,6 @@ type btrfsOptions struct { // Init returns a new BTRFS driver. // An error is returned if BTRFS is not supported. func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { - fsMagic, err := graphdriver.GetFSMagic(home) if err != nil { return nil, err @@ -70,7 +69,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) if err != nil { return nil, err } - if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(filepath.Join(home, "subvolumes"), 0o700, rootUID, rootGID); err != nil { return nil, err } @@ -119,7 +118,7 @@ func parseOptions(opt []string) (btrfsOptions, bool, error) { case "btrfs.mountopt": return options, userDiskQuota, fmt.Errorf("btrfs driver does not support mount options") default: - return options, userDiskQuota, fmt.Errorf("unknown option %s", key) + return options, userDiskQuota, fmt.Errorf("unknown option %s (%q)", key, option) } } return options, userDiskQuota, nil @@ -127,7 +126,7 @@ func parseOptions(opt []string) (btrfsOptions, bool, error) { // Driver contains information about the filesystem mounted. type Driver struct { - //root of the file system + // root of the file system home string uidMaps []idtools.IDMap gidMaps []idtools.IDMap @@ -226,7 +225,7 @@ func subvolSnapshot(src, dest, name string) error { var args C.struct_btrfs_ioctl_vol_args_v2 args.fd = C.__s64(getDirFd(srcDir)) - var cs = C.CString(name) + cs := C.CString(name) C.set_name_btrfs_ioctl_vol_args_v2(&args, cs) C.free(unsafe.Pointer(cs)) @@ -479,13 +478,13 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts // Create the filesystem with given id. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - quotas := path.Join(d.home, "quotas") - subvolumes := path.Join(d.home, "subvolumes") + quotas := d.quotasDir() + subvolumes := d.subvolumesDir() rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } - if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(subvolumes, 0o700, rootUID, rootGID); err != nil { return err } if parent == "" { @@ -523,10 +522,10 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { return err } - if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(quotas, 0o700, rootUID, rootGID); err != nil { return err } - if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil { + if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0o644); err != nil { return err } } @@ -560,7 +559,7 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e } driver.options.size = uint64(size) default: - return fmt.Errorf("unknown option %s", key) + return fmt.Errorf("unknown option %s (%q)", key, storageOpt) } } @@ -629,18 +628,13 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { if err != nil { return "", err } - switch len(options.Options) { - case 0: - case 1: - if options.Options[0] == "ro" { + for _, opt := range options.Options { + if opt == "ro" { // ignore "ro" option - break + continue } - fallthrough - default: return "", fmt.Errorf("btrfs driver does not support mount options") } - if !st.IsDir() { return "", fmt.Errorf("%s: not a directory", dir) } @@ -679,9 +673,21 @@ func (d *Driver) Exists(id string) bool { return err == nil } -// List layers (not including additional image stores) +// List all of the layers known to the driver. func (d *Driver) ListLayers() ([]string, error) { - return nil, graphdriver.ErrNotSupported + subvolumesDir := filepath.Join(d.home, "subvolumes") + entries, err := os.ReadDir(subvolumesDir) + if err != nil { + return nil, err + } + results := make([]string, 0, len(entries)) + for _, entry := range entries { + if !entry.IsDir() { + continue + } + results = append(results, entry.Name()) + } + return results, nil } // AdditionalImageStores returns additional image stores supported by the driver diff --git a/vendor/github.com/containers/storage/drivers/chown_windows.go b/vendor/github.com/containers/storage/drivers/chown_windows.go index 1845a4e08..06ccf9fa4 100644 --- a/vendor/github.com/containers/storage/drivers/chown_windows.go +++ b/vendor/github.com/containers/storage/drivers/chown_windows.go @@ -10,8 +10,7 @@ import ( "github.com/containers/storage/pkg/idtools" ) -type platformChowner struct { -} +type platformChowner struct{} func newLChowner() *platformChowner { return &platformChowner{} diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go index aa88c1a74..9c3d7c668 100644 --- a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go +++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go @@ -11,6 +11,7 @@ package copy #endif */ import "C" + import ( "container/list" "errors" diff --git a/vendor/github.com/containers/storage/drivers/counter.go b/vendor/github.com/containers/storage/drivers/counter.go index 015766676..964dcaf2f 100644 --- a/vendor/github.com/containers/storage/drivers/counter.go +++ b/vendor/github.com/containers/storage/drivers/counter.go @@ -53,7 +53,7 @@ func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int { } } else if !c.checker.IsMounted(path) { // if the unmount was performed outside of this process (e.g. conmon cleanup) - //the ref counter would lose track of it. Check if it is still mounted. + // the ref counter would lose track of it. Check if it is still mounted. m.count = 0 } infoOp(m) diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go index 56c117d1b..388602b63 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go @@ -177,7 +177,7 @@ func writeLVMConfig(root string, cfg directLVMConfig) error { if err != nil { return fmt.Errorf("marshalling direct lvm config: %w", err) } - if err := os.WriteFile(p, b, 0600); err != nil { + if err := os.WriteFile(p, b, 0o600); err != nil { return fmt.Errorf("writing direct lvm config to file: %w", err) } return nil @@ -193,7 +193,7 @@ func setupDirectLVM(cfg directLVMConfig) error { } } - err := os.MkdirAll(lvmProfileDir, 0755) + err := os.MkdirAll(lvmProfileDir, 0o755) if err != nil { return fmt.Errorf("creating lvm profile directory: %w", err) } @@ -241,7 +241,7 @@ func setupDirectLVM(cfg directLVMConfig) error { } profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent) - err = os.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600) + err = os.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0o600) if err != nil { return fmt.Errorf("writing storage thinp autoextend profile: %w", err) } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go index d2d0effc3..5d8df8a78 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go @@ -124,7 +124,7 @@ type DeviceSet struct { deletionWorkerTicker *time.Ticker uidMaps []idtools.IDMap gidMaps []idtools.IDMap - minFreeSpacePercent uint32 //min free space percentage in thinpool + minFreeSpacePercent uint32 // min free space percentage in thinpool xfsNospaceRetries string // max retries when xfs receives ENOSPC lvmSetupConfig directLVMConfig } @@ -273,7 +273,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { if err != nil { return "", err } - if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil { + if err := idtools.MkdirAllAs(dirname, 0o700, uid, gid); err != nil { return "", err } @@ -282,7 +282,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { return "", err } logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) - file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0o600) if err != nil { return "", err } @@ -293,7 +293,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { } } else { if fi.Size() < size { - file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0o600) if err != nil { return "", err } @@ -421,7 +421,6 @@ func (devices *DeviceSet) constructDeviceIDMap() { } func (devices *DeviceSet) deviceFileWalkFunction(path string, name string) error { - // Skip some of the meta files which are not device files. if strings.HasSuffix(name, ".migrated") { logrus.Debugf("devmapper: Skipping file %s", path) @@ -458,7 +457,7 @@ func (devices *DeviceSet) loadDeviceFilesOnStart() error { logrus.Debug("devmapper: loadDeviceFilesOnStart()") defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") - var scan = func(path string, d fs.DirEntry, err error) error { + scan := func(path string, d fs.DirEntry, err error) error { if err != nil { logrus.Debugf("devmapper: Can't walk the file %s: %v", path, err) return nil @@ -1001,6 +1000,10 @@ func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { devices.Lock() defer devices.Unlock() + if devices.filesystem == "" { + devices.filesystem = determineDefaultFS() + } + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { return err } @@ -1152,7 +1155,6 @@ func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { } func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { - if !userBaseSize { return nil } @@ -1191,7 +1193,7 @@ func (devices *DeviceSet) growFS(info *devInfo) error { fsMountPoint := "/run/containers/storage/mnt" if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { - if err := os.MkdirAll(fsMountPoint, 0700); err != nil { + if err := os.MkdirAll(fsMountPoint, 0o700); err != nil { return err } defer os.RemoveAll(fsMountPoint) @@ -1657,7 +1659,6 @@ func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { } func (devices *DeviceSet) enableDeferredRemovalDeletion() error { - // If user asked for deferred removal then check both libdm library // and kernel driver support deferred removal otherwise error out. if enableDeferredRemoval { @@ -1695,16 +1696,19 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { } } - //create the root dir of the devmapper driver ownership to match this - //daemon's remapped root uid/gid so containers can start properly + // create the root dir of the devmapper driver ownership to match this + // daemon's remapped root uid/gid so containers can start properly uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) if err != nil { return err } - if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil { + if err := idtools.MkdirAs(devices.root, 0o700, uid, gid); err != nil { + return err + } + if err := os.MkdirAll(devices.metadataDir(), 0o700); err != nil { return err } - if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil { + if err := idtools.MkdirAs(filepath.Join(devices.root, "mnt"), 0o700, uid, gid); err != nil && !errors.Is(err, os.ErrExist) { return err } @@ -1811,7 +1815,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { devices.dataLoopFile = data devices.dataDevice = dataFile.Name() } else { - dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) + dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0o600) if err != nil { return err } @@ -1844,7 +1848,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { devices.metadataLoopFile = metadata devices.metadataDevice = metadataFile.Name() } else { - metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) + metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0o600) if err != nil { return err } @@ -1966,7 +1970,6 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string } func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) { - // Read size to change the block device size per container. for key, val := range storageOpt { key := strings.ToLower(key) @@ -2317,7 +2320,7 @@ func (devices *DeviceSet) Shutdown(home string) error { info.lock.Lock() devices.Lock() if err := devices.deactivateDevice(info); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err) + logrus.Debugf("devmapper: Shutdown deactivate base, error: %s", err) } devices.Unlock() info.lock.Unlock() @@ -2326,7 +2329,7 @@ func (devices *DeviceSet) Shutdown(home string) error { devices.Lock() if devices.thinPoolDevice == "" { if err := devices.deactivatePool(); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err) + logrus.Debugf("devmapper: Shutdown deactivate pool, error: %s", err) } } devices.Unlock() @@ -2483,6 +2486,26 @@ func (devices *DeviceSet) List() []string { return ids } +// ListLayers returns a list of device IDs, omitting the ""/"base" device and +// any which have been marked as deleted. +func (devices *DeviceSet) ListLayers() ([]string, error) { + if err := devices.cleanupDeletedDevices(); err != nil { + return nil, err + } + + devices.Lock() + defer devices.Unlock() + + ids := make([]string, 0, len(devices.Devices)) + for k, d := range devices.Devices { + if k == "" || d.Deleted { + continue + } + ids = append(ids, k) + } + return ids, nil +} + func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { var params string _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) @@ -2520,7 +2543,6 @@ func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { } sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) - if err != nil { return nil, err } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go index 8b3ee51df..8b8a1d177 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go @@ -20,7 +20,7 @@ import ( "golang.org/x/sys/unix" ) -const defaultPerms = os.FileMode(0555) +const defaultPerms = os.FileMode(0o555) func init() { graphdriver.MustRegister("devicemapper", Init) @@ -55,7 +55,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), locker: locker.New(), } - return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil } @@ -103,7 +102,6 @@ func (d *Driver) Status() [][2]string { // Metadata returns a map of information about the device. func (d *Driver) Metadata(id string) (map[string]string, error) { m, err := d.DeviceSet.exportDeviceMetadata(id) - if err != nil { return nil, err } @@ -202,11 +200,11 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { } // Create the target directories if they don't exist - if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil { + if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0o755, uid, gid); err != nil { d.ctr.Decrement(mp) return "", err } - if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) { + if err := idtools.MkdirAs(mp, 0o755, uid, gid); err != nil && !os.IsExist(err) { d.ctr.Decrement(mp) return "", err } @@ -227,7 +225,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { // Create an "id" file with the container/image id in it to help reconstruct this in case // of later problems - if err := os.WriteFile(idFile, []byte(id), 0600); err != nil { + if err := os.WriteFile(idFile, []byte(id), 0o600); err != nil { d.ctr.Decrement(mp) d.DeviceSet.UnmountDevice(id, mp) return "", err @@ -267,11 +265,6 @@ func (d *Driver) Exists(id string) bool { return d.DeviceSet.HasDevice(id) } -// List layers (not including additional image stores) -func (d *Driver) ListLayers() ([]string, error) { - return nil, graphdriver.ErrNotSupported -} - // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { return nil diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index 055d99d18..f7b0d6891 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -111,6 +111,10 @@ type ProtoDriver interface { Exists(id string) bool // Returns a list of layer ids that exist on this driver (does not include // additional storage layers). Not supported by all backends. + // If the driver requires that layers be removed in a particular order, + // usually due to parent-child relationships that it cares about, The + // list should be sorted well enough so that if all layers need to be + // removed, they can be removed in the order in which they're returned. ListLayers() ([]string, error) // Status returns a set of key-value pairs which give low // level diagnostic status about this driver. @@ -183,6 +187,8 @@ type DriverWithDifferOutput struct { UncompressedDigest digest.Digest Metadata string BigData map[string][]byte + TarSplit []byte + TOCDigest digest.Digest } // Differ defines the interface for using a custom differ. @@ -322,6 +328,7 @@ func getBuiltinDriver(name, home string, options Options) (Driver, error) { type Options struct { Root string RunRoot string + ImageStore string DriverPriority []string DriverOptions []string UIDMaps []idtools.IDMap @@ -337,12 +344,12 @@ func New(name string, config Options) (Driver, error) { } // Guess for prior driver - driversMap := scanPriorDrivers(config.Root) + driversMap := ScanPriorDrivers(config.Root) // use the supplied priority list unless it is empty prioList := config.DriverPriority if len(prioList) == 0 { - prioList = priority + prioList = Priority } for _, name := range prioList { @@ -414,12 +421,12 @@ func isDriverNotSupported(err error) bool { } // scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers -func scanPriorDrivers(root string) map[string]bool { +func ScanPriorDrivers(root string) map[string]bool { driversMap := make(map[string]bool) for driver := range drivers { p := filepath.Join(root, driver) - if _, err := os.Stat(p); err == nil && driver != "vfs" { + if _, err := os.Stat(p); err == nil { driversMap[driver] = true } } diff --git a/vendor/github.com/containers/storage/drivers/driver_darwin.go b/vendor/github.com/containers/storage/drivers/driver_darwin.go index 357851543..b60883a9e 100644 --- a/vendor/github.com/containers/storage/drivers/driver_darwin.go +++ b/vendor/github.com/containers/storage/drivers/driver_darwin.go @@ -1,11 +1,9 @@ package graphdriver -var ( - // Slice of drivers that should be used in order - priority = []string{ - "vfs", - } -) +// Slice of drivers that should be used in order +var Priority = []string{ + "vfs", +} // GetFSMagic returns the filesystem id given the path. func GetFSMagic(rootpath string) (FsMagic, error) { diff --git a/vendor/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/github.com/containers/storage/drivers/driver_freebsd.go index 143cccf92..a6072ab56 100644 --- a/vendor/github.com/containers/storage/drivers/driver_freebsd.go +++ b/vendor/github.com/containers/storage/drivers/driver_freebsd.go @@ -13,7 +13,7 @@ const ( var ( // Slice of drivers that should be used in an order - priority = []string{ + Priority = []string{ "zfs", "vfs", } @@ -31,8 +31,7 @@ func NewDefaultChecker() Checker { return &defaultChecker{} } -type defaultChecker struct { -} +type defaultChecker struct{} func (c *defaultChecker) IsMounted(path string) bool { m, _ := mount.Mounted(path) diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go index b9e57a60d..cd806b8ff 100644 --- a/vendor/github.com/containers/storage/drivers/driver_linux.go +++ b/vendor/github.com/containers/storage/drivers/driver_linux.go @@ -60,6 +60,8 @@ const ( FsMagicCephFs = FsMagic(0x00C36400) // FsMagicCIFS filesystem id for CIFS FsMagicCIFS = FsMagic(0xFF534D42) + // FsMagicEROFS filesystem id for EROFS + FsMagicEROFS = FsMagic(0xE0F5E1E2) // FsMagicFHGFS filesystem id for FHGFS FsMagicFHGFSFs = FsMagic(0x19830326) // FsMagicIBRIX filesystem id for IBRIX @@ -90,7 +92,7 @@ const ( var ( // Slice of drivers that should be used in an order - priority = []string{ + Priority = []string{ "overlay", // We don't support devicemapper without configuration // "devicemapper", @@ -106,6 +108,7 @@ var ( FsMagicBtrfs: "btrfs", FsMagicCramfs: "cramfs", FsMagicEcryptfs: "ecryptfs", + FsMagicEROFS: "erofs", FsMagicExtfs: "extfs", FsMagicF2fs: "f2fs", FsMagicGPFS: "gpfs", @@ -161,8 +164,7 @@ func NewDefaultChecker() Checker { return &defaultChecker{} } -type defaultChecker struct { -} +type defaultChecker struct{} func (c *defaultChecker) IsMounted(path string) bool { m, _ := mount.Mounted(path) diff --git a/vendor/github.com/containers/storage/drivers/driver_solaris.go b/vendor/github.com/containers/storage/drivers/driver_solaris.go index 962edd176..6b6373a37 100644 --- a/vendor/github.com/containers/storage/drivers/driver_solaris.go +++ b/vendor/github.com/containers/storage/drivers/driver_solaris.go @@ -16,6 +16,7 @@ static inline struct statvfs *getstatfs(char *s) { } */ import "C" + import ( "path/filepath" "unsafe" @@ -31,7 +32,7 @@ const ( var ( // Slice of drivers that should be used in an order - priority = []string{ + Priority = []string{ "zfs", } @@ -69,8 +70,7 @@ func NewDefaultChecker() Checker { return &defaultChecker{} } -type defaultChecker struct { -} +type defaultChecker struct{} func (c *defaultChecker) IsMounted(path string) bool { m, _ := mount.Mounted(path) @@ -80,7 +80,6 @@ func (c *defaultChecker) IsMounted(path string) bool { // Mounted checks if the given path is mounted as the fs type // Solaris supports only ZFS for now func Mounted(fsType FsMagic, mountPath string) (bool, error) { - cs := C.CString(filepath.Dir(mountPath)) defer C.free(unsafe.Pointer(cs)) buf := C.getstatfs(cs) diff --git a/vendor/github.com/containers/storage/drivers/driver_unsupported.go b/vendor/github.com/containers/storage/drivers/driver_unsupported.go index 8119d9a6c..7dfbef007 100644 --- a/vendor/github.com/containers/storage/drivers/driver_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/driver_unsupported.go @@ -3,12 +3,10 @@ package graphdriver -var ( - // Slice of drivers that should be used in an order - priority = []string{ - "unsupported", - } -) +// Slice of drivers that should be used in an order +var Priority = []string{ + "unsupported", +} // GetFSMagic returns the filesystem id given the path. func GetFSMagic(rootpath string) (FsMagic, error) { diff --git a/vendor/github.com/containers/storage/drivers/driver_windows.go b/vendor/github.com/containers/storage/drivers/driver_windows.go index ffd30c295..54bd139a3 100644 --- a/vendor/github.com/containers/storage/drivers/driver_windows.go +++ b/vendor/github.com/containers/storage/drivers/driver_windows.go @@ -1,11 +1,9 @@ package graphdriver -var ( - // Slice of drivers that should be used in order - priority = []string{ - "windowsfilter", - } -) +// Slice of drivers that should be used in order +var Priority = []string{ + "windowsfilter", +} // GetFSMagic returns the filesystem id given the path. func GetFSMagic(rootpath string) (FsMagic, error) { diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go index 6b2496ec5..fba9ec4fc 100644 --- a/vendor/github.com/containers/storage/drivers/fsdiff.go +++ b/vendor/github.com/containers/storage/drivers/fsdiff.go @@ -14,11 +14,9 @@ import ( "github.com/sirupsen/logrus" ) -var ( - // ApplyUncompressedLayer defines the unpack method used by the graph - // driver. - ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer -) +// ApplyUncompressedLayer defines the unpack method used by the graph +// driver. +var ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer // NaiveDiffDriver takes a ProtoDriver and adds the // capability of the Diffing methods which it may or may not @@ -57,6 +55,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare options := MountOpts{ MountLabel: mountLabel, + Options: []string{"ro"}, } layerFs, err := driver.Get(id, options) if err != nil { @@ -173,7 +172,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts) } defer driverPut(driver, id, &err) - defaultForceMask := os.FileMode(0700) + defaultForceMask := os.FileMode(0o700) var forceMask *os.FileMode // = nil if runtime.GOOS == "darwin" { forceMask = &defaultForceMask diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go index 437112742..60980994b 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/check.go +++ b/vendor/github.com/containers/storage/drivers/overlay/check.go @@ -38,22 +38,22 @@ func doesSupportNativeDiff(d, mountOpts string) error { }() // Make directories l1/d, l1/d1, l2/d, l3, work, merged - if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0o755); err != nil { return err } - if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0o755); err != nil { return err } - if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0o755); err != nil { return err } - if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil { + if err := os.Mkdir(filepath.Join(td, "l3"), 0o755); err != nil { return err } - if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { + if err := os.Mkdir(filepath.Join(td, "work"), 0o755); err != nil { return err } - if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { + if err := os.Mkdir(filepath.Join(td, "merged"), 0o755); err != nil { return err } @@ -82,7 +82,7 @@ func doesSupportNativeDiff(d, mountOpts string) error { }() // Touch file in d to force copy up of opaque directory "d" from "l2" to "l3" - if err := os.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil { + if err := os.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0o644); err != nil { return fmt.Errorf("failed to write to merged directory: %w", err) } @@ -132,19 +132,19 @@ func doesMetacopy(d, mountOpts string) (bool, error) { }() // Make directories l1, l2, work, merged - if err := os.MkdirAll(filepath.Join(td, "l1"), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(td, "l1"), 0o755); err != nil { return false, err } - if err := ioutils.AtomicWriteFile(filepath.Join(td, "l1", "f"), []byte{0xff}, 0700); err != nil { + if err := ioutils.AtomicWriteFile(filepath.Join(td, "l1", "f"), []byte{0xff}, 0o700); err != nil { return false, err } - if err := os.MkdirAll(filepath.Join(td, "l2"), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(td, "l2"), 0o755); err != nil { return false, err } - if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { + if err := os.Mkdir(filepath.Join(td, "work"), 0o755); err != nil { return false, err } - if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { + if err := os.Mkdir(filepath.Join(td, "merged"), 0o755); err != nil { return false, err } // Mount using the mandatory options and configured options @@ -170,7 +170,7 @@ func doesMetacopy(d, mountOpts string) (bool, error) { }() // Make a change that only impacts the inode, and check if the pulled-up copy is marked // as a metadata-only copy - if err := os.Chmod(filepath.Join(td, "merged", "f"), 0600); err != nil { + if err := os.Chmod(filepath.Join(td, "merged", "f"), 0o600); err != nil { return false, fmt.Errorf("changing permissions on file for metacopy check: %w", err) } metacopy, err := system.Lgetxattr(filepath.Join(td, "l2", "f"), archive.GetOverlayXattrName("metacopy")) @@ -196,20 +196,23 @@ func doesVolatile(d string) (bool, error) { } }() - if err := os.MkdirAll(filepath.Join(td, "lower"), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(td, "lower"), 0o755); err != nil { return false, err } - if err := os.MkdirAll(filepath.Join(td, "upper"), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(td, "upper"), 0o755); err != nil { return false, err } - if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { + if err := os.Mkdir(filepath.Join(td, "work"), 0o755); err != nil { return false, err } - if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { + if err := os.Mkdir(filepath.Join(td, "merged"), 0o755); err != nil { return false, err } // Mount using the mandatory options and configured options opts := fmt.Sprintf("volatile,lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "lower"), path.Join(td, "upper"), path.Join(td, "work")) + if unshare.IsRootless() { + opts = fmt.Sprintf("%s,userxattr", opts) + } if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { return false, fmt.Errorf("failed to mount overlay for volatile check: %w", err) } @@ -238,11 +241,11 @@ func supportsIdmappedLowerLayers(home string) (bool, error) { upperDir := filepath.Join(layerDir, "upper") workDir := filepath.Join(layerDir, "work") - _ = idtools.MkdirAs(mergedDir, 0700, 0, 0) - _ = idtools.MkdirAs(lowerDir, 0700, 0, 0) - _ = idtools.MkdirAs(lowerMappedDir, 0700, 0, 0) - _ = idtools.MkdirAs(upperDir, 0700, 0, 0) - _ = idtools.MkdirAs(workDir, 0700, 0, 0) + _ = idtools.MkdirAs(mergedDir, 0o700, 0, 0) + _ = idtools.MkdirAs(lowerDir, 0o700, 0, 0) + _ = idtools.MkdirAs(lowerMappedDir, 0o700, 0, 0) + _ = idtools.MkdirAs(upperDir, 0o700, 0, 0) + _ = idtools.MkdirAs(workDir, 0o700, 0, 0) mapping := []idtools.IDMap{ { diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go index de47951d4..33e60b118 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/mount.go +++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go @@ -55,7 +55,7 @@ func mountOverlayFrom(dir, device, target, mType string, flags uintptr, label st w.Close() return fmt.Errorf("mountfrom error on re-exec cmd: %w", err) } - //write the options to the pipe for the untar exec to read + // write the options to the pipe for the untar exec to read if err := json.NewEncoder(w).Encode(options); err != nil { w.Close() return fmt.Errorf("mountfrom json encode to pipe failed: %w", err) diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index b606713f0..1ef7122c5 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -29,7 +29,6 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" - "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/unshare" units "github.com/docker/go-units" @@ -41,13 +40,13 @@ import ( "golang.org/x/sys/unix" ) -var ( - // untar defines the untar method - untar = chrootarchive.UntarUncompressed -) +// untar defines the untar method +var untar = chrootarchive.UntarUncompressed const ( - defaultPerms = os.FileMode(0555) + defaultPerms = os.FileMode(0o555) + selinuxLabelTest = "system_u:object_r:container_file_t:s0" + mountProgramFlagFile = ".has-mount-program" ) // This backend uses the overlay union filesystem for containers @@ -78,9 +77,10 @@ const ( // that mounts do not fail due to length. const ( - linkDir = "l" - lowerFile = "lower" - maxDepth = 500 + linkDir = "l" + stagingDir = "staging" + lowerFile = "lower" + maxDepth = 500 // idLength represents the number of random characters // which can be used to create the unique link identifier @@ -110,6 +110,7 @@ type Driver struct { name string home string runhome string + imageStore string uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter @@ -124,7 +125,6 @@ type Driver struct { } type additionalLayerStore struct { - // path is the directory where this store is available on the host. path string @@ -175,11 +175,11 @@ func hasVolatileOption(opts []string) bool { } func getMountProgramFlagFile(path string) string { - return filepath.Join(path, ".has-mount-program") + return filepath.Join(path, mountProgramFlagFile) } func checkSupportVolatile(home, runhome string) (bool, error) { - feature := fmt.Sprintf("volatile") + const feature = "volatile" volatileCacheResult, _, err := cachedFeatureCheck(runhome, feature) var usingVolatile bool if err == nil { @@ -200,6 +200,8 @@ func checkSupportVolatile(home, runhome string) (bool, error) { if err = cachedFeatureRecord(runhome, feature, usingVolatile, ""); err != nil { return false, fmt.Errorf("recording volatile-being-used status: %w", err) } + } else { + usingVolatile = false } } return usingVolatile, nil @@ -303,6 +305,16 @@ func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool { // If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error. // If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned. func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { + // If custom --imagestore is selected never + // ditch the original graphRoot, instead add it as + // additionalImageStore so its images can still be + // read and used. + if options.ImageStore != "" { + graphRootAsAdditionalStore := fmt.Sprintf("AdditionalImageStore=%s", options.ImageStore) + options.DriverOptions = append(options.DriverOptions, graphRootAsAdditionalStore) + // complete base name with driver name included + options.ImageStore = filepath.Join(options.ImageStore, "overlay") + } opts, err := parseOptions(options.DriverOptions) if err != nil { return nil, err @@ -325,11 +337,17 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) } // Create the driver home dir - if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0755, 0, 0); err != nil { + if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0o755, 0, 0); err != nil { return nil, err } - if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil { + if options.ImageStore != "" { + if err := idtools.MkdirAllAs(path.Join(options.ImageStore, linkDir), 0o755, 0, 0); err != nil { + return nil, err + } + } + + if err := idtools.MkdirAllAs(runhome, 0o700, rootUID, rootGID); err != nil { return nil, err } @@ -345,12 +363,12 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) if opts.mountProgram != "" { if unshare.IsRootless() && isNetworkFileSystem(fsMagic) && opts.forceMask == nil { - m := os.FileMode(0700) + m := os.FileMode(0o700) opts.forceMask = &m logrus.Warnf("Network file system detected as backing store. Enforcing overlay option `force_mask=\"%o\"`. Add it to storage.conf to silence this warning", m) } - if err := os.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0600); err != nil { + if err := os.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0o600); err != nil { return nil, err } } else { @@ -420,6 +438,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) d := &Driver{ name: "overlay", home: home, + imageStore: options.ImageStore, runhome: runhome, uidMaps: options.UIDMaps, gidMaps: options.GIDMaps, @@ -560,9 +579,9 @@ func parseOptions(options []string) (*overlayOptions, error) { var mask int64 switch val { case "shared": - mask = 0755 + mask = 0o755 case "private": - mask = 0700 + mask = 0o700 default: mask, err = strconv.ParseInt(val, 8, 32) if err != nil { @@ -627,7 +646,7 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) { if err != nil && !os.IsNotExist(err) { return false, err } - if err := os.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0600); err != nil && !os.IsNotExist(err) { + if err := os.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0o600); err != nil && !os.IsNotExist(err) { return false, err } if needsMountProgram { @@ -640,7 +659,7 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) { for _, dir := range []string{home, runhome} { if _, err := os.Stat(dir); err != nil { - _ = idtools.MkdirAllAs(dir, 0700, 0, 0) + _ = idtools.MkdirAllAs(dir, 0o700, 0, 0) } } @@ -700,12 +719,12 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI _ = os.RemoveAll(layerDir) _ = os.Remove(home) }() - _ = idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID) - _ = idtools.MkdirAs(lower1Dir, 0700, rootUID, rootGID) - _ = idtools.MkdirAs(lower2Dir, 0700, rootUID, rootGID) - _ = idtools.MkdirAs(lower2Subdir, 0700, rootUID, rootGID) - _ = idtools.MkdirAs(upperDir, 0700, rootUID, rootGID) - _ = idtools.MkdirAs(workDir, 0700, rootUID, rootGID) + _ = idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID) + _ = idtools.MkdirAs(lower1Dir, 0o700, rootUID, rootGID) + _ = idtools.MkdirAs(lower2Dir, 0o700, rootUID, rootGID) + _ = idtools.MkdirAs(lower2Subdir, 0o700, rootUID, rootGID) + _ = idtools.MkdirAs(upperDir, 0o700, rootUID, rootGID) + _ = idtools.MkdirAs(workDir, 0o700, rootUID, rootGID) f, err := os.Create(lower2SubdirFile) if err != nil { logrus.Debugf("Unable to create test file: %v", err) @@ -723,7 +742,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI if unshare.IsRootless() { flags = fmt.Sprintf("%s,userxattr", flags) } - if err := syscall.Mknod(filepath.Join(upperDir, "whiteout"), syscall.S_IFCHR|0600, int(unix.Mkdev(0, 0))); err != nil { + if err := syscall.Mknod(filepath.Join(upperDir, "whiteout"), syscall.S_IFCHR|0o600, int(unix.Mkdev(0, 0))); err != nil { logrus.Debugf("Unable to create kernel-style whiteout: %v", err) return supportsDType, fmt.Errorf("unable to create kernel-style whiteout: %w", err) } @@ -806,15 +825,22 @@ func (d *Driver) Status() [][2]string { // Metadata returns meta data about the overlay driver such as // LowerDir, UpperDir, WorkDir and MergeDir used to store data. func (d *Driver) Metadata(id string) (map[string]string, error) { - dir := d.dir(id) + dir, imagestore, _ := d.dir2(id) if _, err := os.Stat(dir); err != nil { return nil, err } + workDirBase := dir + if imagestore != "" { + if _, err := os.Stat(dir); err != nil { + return nil, err + } + workDirBase = imagestore + } metadata := map[string]string{ - "WorkDir": path.Join(dir, "work"), - "MergedDir": path.Join(dir, "merged"), - "UpperDir": path.Join(dir, "diff"), + "WorkDir": path.Join(workDirBase, "work"), + "MergedDir": path.Join(workDirBase, "merged"), + "UpperDir": path.Join(workDirBase, "diff"), } lowerDirs, err := d.getLowerDirs(id) @@ -929,7 +955,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr } func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disableQuota bool) (retErr error) { - dir := d.dir(id) + dir, imageStore, _ := d.dir2(id) uidMaps := d.uidMaps gidMaps := d.gidMaps @@ -940,7 +966,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable } // Make the link directory if it does not exist - if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0755, 0, 0); err != nil { + if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0o755, 0, 0); err != nil { return err } @@ -954,11 +980,22 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable GID: rootGID, } - if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0755, idPair); err != nil { + if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0o755, idPair); err != nil { return err } + workDirBase := dir + if imageStore != "" { + workDirBase = imageStore + if err := idtools.MkdirAllAndChownNew(path.Dir(imageStore), 0o755, idPair); err != nil { + return err + } + } if parent != "" { - st, err := system.Stat(d.dir(parent)) + parentBase, parentImageStore, _ := d.dir2(parent) + if parentImageStore != "" { + parentBase = parentImageStore + } + st, err := system.Stat(filepath.Join(parentBase, "diff")) if err != nil { return err } @@ -975,9 +1012,14 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable } } - if err := idtools.MkdirAllAndChownNew(dir, 0700, idPair); err != nil { + if err := idtools.MkdirAllAndChownNew(dir, 0o700, idPair); err != nil { return err } + if imageStore != "" { + if err := idtools.MkdirAllAndChownNew(imageStore, 0o700, idPair); err != nil { + return err + } + } defer func() { // Clean up on failure @@ -985,6 +1027,11 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable if err2 := os.RemoveAll(dir); err2 != nil { logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2) } + if imageStore != "" { + if err2 := os.RemoveAll(workDirBase); err2 != nil { + logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", workDirBase, err2) + } + } } }() @@ -1007,44 +1054,60 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable if err := d.quotaCtl.SetQuota(dir, quota); err != nil { return err } + if imageStore != "" { + if err := d.quotaCtl.SetQuota(imageStore, quota); err != nil { + return err + } + } } perms := defaultPerms if d.options.forceMask != nil { perms = *d.options.forceMask } + if parent != "" { - st, err := system.Stat(filepath.Join(d.dir(parent), "diff")) + parentDir, parentImageStore, _ := d.dir2(parent) + base := parentDir + if parentImageStore != "" { + base = parentImageStore + } + st, err := system.Stat(filepath.Join(base, "diff")) if err != nil { return err } perms = os.FileMode(st.Mode()) } - if err := idtools.MkdirAs(path.Join(dir, "diff"), perms, rootUID, rootGID); err != nil { + if err := idtools.MkdirAs(path.Join(workDirBase, "diff"), perms, rootUID, rootGID); err != nil { return err } lid := generateID(idLength) - if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil { + + linkBase := path.Join("..", id, "diff") + if imageStore != "" { + linkBase = path.Join(imageStore, "diff") + } + if err := os.Symlink(linkBase, path.Join(d.home, linkDir, lid)); err != nil { return err } // Write link id to link file - if err := os.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { + if err := os.WriteFile(path.Join(dir, "link"), []byte(lid), 0o644); err != nil { return err } - if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAs(path.Join(workDirBase, "work"), 0o700, rootUID, rootGID); err != nil { return err } - if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0o700, rootUID, rootGID); err != nil { return err } // if no parent directory, create a dummy lower directory and skip writing a "lowers" file if parent == "" { - return idtools.MkdirAs(path.Join(dir, "empty"), 0700, rootUID, rootGID) + return idtools.MkdirAs(path.Join(dir, "empty"), 0o700, rootUID, rootGID) } lower, err := d.getLower(parent) @@ -1052,7 +1115,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable return err } if lower != "" { - if err := os.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { + if err := os.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0o666); err != nil { return err } } @@ -1120,22 +1183,26 @@ func (d *Driver) getLower(parent string) (string, error) { } func (d *Driver) dir(id string) string { - p, _ := d.dir2(id) + p, _, _ := d.dir2(id) return p } -func (d *Driver) dir2(id string) (string, bool) { +func (d *Driver) dir2(id string) (string, string, bool) { newpath := path.Join(d.home, id) + imageStore := "" + if d.imageStore != "" { + imageStore = path.Join(d.imageStore, id) + } if _, err := os.Stat(newpath); err != nil { for _, p := range d.AdditionalImageStores() { l := path.Join(p, d.name, id) _, err = os.Stat(l) if err == nil { - return l, true + return l, imageStore, true } } } - return newpath, false + return newpath, imageStore, false } func (d *Driver) getLowerDirs(id string) ([]string, error) { @@ -1223,6 +1290,9 @@ func (d *Driver) Remove(id string) error { } if d.quotaCtl != nil { d.quotaCtl.ClearQuota(dir) + if d.imageStore != "" { + d.quotaCtl.ClearQuota(d.imageStore) + } } return nil } @@ -1240,7 +1310,7 @@ func (d *Driver) recreateSymlinks() error { return fmt.Errorf("reading driver home directory %q: %w", d.home, err) } // This makes the link directory if it doesn't exist - if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0755, 0, 0); err != nil { + if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0o755, 0, 0); err != nil { return err } // Keep looping as long as we take some corrective action in each iteration @@ -1317,7 +1387,7 @@ func (d *Driver) recreateSymlinks() error { if err != nil || string(data) != link.Name() { // NOTE: If two or more links point to the same target, we will update linkFile // with every value of link.Name(), and set madeProgress = true every time. - if err := os.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil { + if err := os.WriteFile(linkFile, []byte(link.Name()), 0o644); err != nil { errs = multierror.Append(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err)) continue } @@ -1342,10 +1412,14 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { } func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) { - dir, inAdditionalStore := d.dir2(id) + dir, imageStore, inAdditionalStore := d.dir2(id) if _, err := os.Stat(dir); err != nil { return "", err } + workDirBase := dir + if imageStore != "" { + workDirBase = imageStore + } readWrite := !inAdditionalStore if !d.SupportsShifting() || options.DisableShifting { @@ -1478,18 +1552,18 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO absLowers = append(absLowers, path.Join(dir, "empty")) } // user namespace requires this to move a directory from lower to upper. - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps) if err != nil { return "", err } - diffDir := path.Join(dir, "diff") + diffDir := path.Join(workDirBase, "diff") if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil { return "", err } mergedDir := path.Join(dir, "merged") // Create the driver merged dir - if err := idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) { return "", err } if count := d.ctr.Increment(mergedDir); count > 1 { @@ -1505,7 +1579,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } }() - workdir := path.Join(dir, "work") + workdir := path.Join(workDirBase, "work") if d.options.mountProgram == "" && unshare.IsRootless() { optsList = append(optsList, "userxattr") @@ -1525,7 +1599,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO if !disableShifting && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 && d.options.mountProgram == "" { var newAbsDir []string mappedRoot := filepath.Join(d.home, id, "mapped") - if err := os.MkdirAll(mappedRoot, 0700); err != nil { + if err := os.MkdirAll(mappedRoot, 0o700); err != nil { return "", err } @@ -1612,16 +1686,15 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO // Use mountFrom when the mount data has exceeded the page size. The mount syscall fails if // the mount data cannot fit within a page and relative links make the mount data much // smaller at the expense of requiring a fork exec to chdir(). - - workdir = path.Join(id, "work") if readWrite { diffDir := path.Join(id, "diff") - opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir) + workDir := path.Join(id, "work") + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workDir) } else { opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) } if len(optsList) > 0 { - opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ",")) + opts = strings.Join(append([]string{opts}, optsList...), ",") } mountData = label.FormatMountLabel(opts, options.MountLabel) mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { @@ -1631,9 +1704,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } // overlay has a check in place to prevent mounting the same file system twice - // if volatile was already specified. - err = os.RemoveAll(filepath.Join(workdir, "work/incompat/volatile")) - if err != nil && !os.IsNotExist(err) { + // if volatile was already specified. Yes, the kernel repeats the "work" component. + err = os.RemoveAll(filepath.Join(workdir, "work", "incompat", "volatile")) + if err != nil && !errors.Is(err, os.ErrNotExist) { return "", err } @@ -1703,11 +1776,13 @@ func (d *Driver) Put(id string) error { if !unmounted { if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) { logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) + return fmt.Errorf("unmounting %q: %w", mountpoint, err) } } if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { logrus.Debugf("Failed to remove mountpoint %s overlay: %s - %v", id, mountpoint, err) + return fmt.Errorf("removing mount point %q: %w", mountpoint, err) } return nil @@ -1725,20 +1800,23 @@ func (d *Driver) ListLayers() ([]string, error) { if err != nil { return nil, err } - layers := make([]string, 0) for _, entry := range entries { id := entry.Name() - // Does it look like a datadir directory? - if !entry.IsDir() || stringid.ValidateID(id) != nil { + switch id { + case linkDir, stagingDir, quota.BackingFsBlockDeviceLink, mountProgramFlagFile: + // expected, but not a layer. skip it continue + default: + // Does it look like a datadir directory? + if !entry.IsDir() { + continue + } + layers = append(layers, id) } - - layers = append(layers, id) } - - return layers, err + return layers, nil } // isParent returns if the passed in parent is the direct parent of the passed in layer @@ -1795,7 +1873,7 @@ func (g *overlayFileGetter) Close() error { } func (d *Driver) getStagingDir() string { - return filepath.Join(d.home, "staging") + return filepath.Join(d.home, stagingDir) } // DiffGetter returns a FileGetCloser that can read files from the directory that @@ -1831,7 +1909,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App var applyDir string if id == "" { - err := os.MkdirAll(d.getStagingDir(), 0700) + err := os.MkdirAll(d.getStagingDir(), 0o700) if err != nil && !os.IsExist(err) { return graphdriver.DriverWithDifferOutput{}, err } @@ -1874,6 +1952,9 @@ func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory stri if err := os.RemoveAll(diff); err != nil && !os.IsNotExist(err) { return err } + + diffOutput.UncompressedDigest = diffOutput.TOCDigest + return os.Rename(stagingDirectory, diff) } @@ -1884,7 +1965,6 @@ func (d *Driver) DifferTarget(id string) (string, error) { // ApplyDiff applies the new layer into a root func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) { - if !d.isParent(id, parent) { if d.options.ignoreChownErrors { options.IgnoreChownErrors = d.options.ignoreChownErrors @@ -1922,8 +2002,12 @@ func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) } func (d *Driver) getDiffPath(id string) (string, error) { - dir := d.dir(id) - return redirectDiffIfAdditionalLayer(path.Join(dir, "diff")) + dir, imagestore, _ := d.dir2(id) + base := dir + if imagestore != "" { + base = imagestore + } + return redirectDiffIfAdditionalLayer(path.Join(base, "diff")) } func (d *Driver) getLowerDiffPaths(id string) ([]string, error) { @@ -2014,8 +2098,12 @@ func (d *Driver) AdditionalImageStores() []string { // by toContainer to those specified by toHost. func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { var err error - dir := d.dir(id) - diffDir := filepath.Join(dir, "diff") + dir, imagestore, _ := d.dir2(id) + base := dir + if imagestore != "" { + base = imagestore + } + diffDir := filepath.Join(base, "diff") rootUID, rootGID := 0, 0 if toHost != nil { @@ -2196,7 +2284,7 @@ func (al *additionalLayer) CreateAs(id, parent string) error { } // tell the additional layer store that we use this layer. // mark this layer as "additional layer" - if err := os.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0644); err != nil { + if err := os.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0o644); err != nil { return err } notifyUseAdditionalLayer(al.path) diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go index f5484dee7..10ea3c5a5 100644 --- a/vendor/github.com/containers/storage/drivers/quota/projectquota.go +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go @@ -50,6 +50,7 @@ struct fsxattr { #endif */ import "C" + import ( "errors" "fmt" @@ -67,6 +68,10 @@ import ( const projectIDsAllocatedPerQuotaHome = 10000 +// BackingFsBlockDeviceLink is the name of a file that we place in +// the home directory of a driver that uses this package. +const BackingFsBlockDeviceLink = "backingFsBlockDev" + // Quota limit params - currently we only control blocks hard limit and inodes type Quota struct { Size uint64 @@ -94,7 +99,6 @@ func generateUniqueProjectID(path string) (uint32, error) { stat, ok := fileinfo.Sys().(*syscall.Stat_t) if !ok { return 0, fmt.Errorf("not a syscall.Stat_t %s", path) - } projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome) return uint32(projectID), nil @@ -187,7 +191,6 @@ func NewControl(basePath string) (*Control, error) { // SetQuota - assign a unique project id to directory and set the quota limits // for that project id func (q *Control) SetQuota(targetPath string, quota Quota) error { - projectID, ok := q.quotas[targetPath] if !ok { projectID = q.nextProjectID @@ -235,7 +238,7 @@ func (q *Control) setProjectQuota(projectID uint32, quota Quota) error { d.d_ino_softlimit = d.d_ino_hardlimit } - var cs = C.CString(q.backingFsBlockDev) + cs := C.CString(q.backingFsBlockDev) defer C.free(unsafe.Pointer(cs)) runQuotactl := func() syscall.Errno { @@ -303,7 +306,7 @@ func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, err // // get the quota limit for the container's project id // - var cs = C.CString(q.backingFsBlockDev) + cs := C.CString(q.backingFsBlockDev) defer C.free(unsafe.Pointer(cs)) _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA, @@ -395,9 +398,9 @@ func openDir(path string) (*C.DIR, error) { Cpath := C.CString(path) defer free(Cpath) - dir := C.opendir(Cpath) + dir, errno := C.opendir(Cpath) if dir == nil { - return nil, fmt.Errorf("can't open dir %v", Cpath) + return nil, fmt.Errorf("can't open dir %v: %w", Cpath, errno) } return dir, nil } @@ -421,11 +424,18 @@ func makeBackingFsDev(home string) (string, error) { return "", err } - backingFsBlockDev := path.Join(home, "backingFsBlockDev") + backingFsBlockDev := path.Join(home, BackingFsBlockDeviceLink) backingFsBlockDevTmp := backingFsBlockDev + ".tmp" // Re-create just in case someone copied the home directory over to a new device - if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0600, int(stat.Dev)); err != nil { - return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) + if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil { + if !errors.Is(err, unix.EEXIST) { + return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) + } + // On EEXIST, try again after unlinking any potential leftover. + _ = unix.Unlink(backingFsBlockDevTmp) + if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil { + return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) + } } if err := unix.Rename(backingFsBlockDevTmp, backingFsBlockDev); err != nil { return "", fmt.Errorf("failed to rename %s to %s: %w", backingFsBlockDevTmp, backingFsBlockDev, err) diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go index 2f6c7f28f..648fd3379 100644 --- a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go @@ -15,8 +15,7 @@ type Quota struct { // Control - Context to be used by storage driver (e.g. overlay) // who wants to apply project quotas to container dirs -type Control struct { -} +type Control struct{} func NewControl(basePath string) (*Control, error) { return nil, errors.New("filesystem does not support, or has not enabled quotas") diff --git a/vendor/github.com/containers/storage/drivers/template.go b/vendor/github.com/containers/storage/drivers/template.go index 7b96c082d..66ab89f7f 100644 --- a/vendor/github.com/containers/storage/drivers/template.go +++ b/vendor/github.com/containers/storage/drivers/template.go @@ -34,6 +34,7 @@ func NaiveCreateFromTemplate(d TemplateDriver, id, template string, templateIDMa } return err } + defer diff.Close() applyOptions := ApplyDiffOpts{ Diff: diff, diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index bf0cfe940..599cf095d 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -14,19 +14,13 @@ import ( "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/parsers" - "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/system" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/tar/storage" ) -var ( - // CopyDir defines the copy method to use. - CopyDir = dirCopy -) - -const defaultPerms = os.FileMode(0555) +const defaultPerms = os.FileMode(0o555) func init() { graphdriver.MustRegister("vfs", Init) @@ -42,11 +36,10 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) } rootIDs := d.idMappings.RootPair() - if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil { + if err := idtools.MkdirAllAndChown(filepath.Join(home, "dir"), 0o700, rootIDs); err != nil { return nil, err } for _, option := range options.DriverOptions { - key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err @@ -69,6 +62,12 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) return nil, fmt.Errorf("vfs driver does not support %s options", key) } } + // If --imagestore is provided, lets add writable graphRoot + // to vfs's additional image store, as it is done for + // `overlay` driver. + if options.ImageStore != "" { + d.homes = append(d.homes, options.ImageStore) + } d.updater = graphdriver.NewNaiveLayerIDMapUpdater(d) d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d.updater) @@ -161,7 +160,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool dir := d.dir(id) rootIDs := idMappings.RootPair() - if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0700, rootIDs); err != nil { + if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0o700, rootIDs); err != nil { return err } @@ -173,7 +172,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool rootPerms := defaultPerms if runtime.GOOS == "darwin" { - rootPerms = os.FileMode(0700) + rootPerms = os.FileMode(0o700) } if parent != "" { @@ -203,7 +202,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool } return nil - } func (d *Driver) dir(id string) string { @@ -228,15 +226,12 @@ func (d *Driver) Remove(id string) error { // Get returns the directory for the given id. func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { dir := d.dir(id) - switch len(options.Options) { - case 0: - case 1: - if options.Options[0] == "ro" { + + for _, opt := range options.Options { + if opt == "ro" { // ignore "ro" option - break + continue } - fallthrough - default: return "", fmt.Errorf("vfs driver does not support mount options") } if st, err := os.Stat(dir); err != nil { @@ -268,7 +263,7 @@ func (d *Driver) Exists(id string) bool { // List layers (not including additional image stores) func (d *Driver) ListLayers() ([]string, error) { - entries, err := os.ReadDir(d.homes[0]) + entries, err := os.ReadDir(filepath.Join(d.homes[0], "dir")) if err != nil { return nil, err } @@ -278,7 +273,7 @@ func (d *Driver) ListLayers() ([]string, error) { for _, entry := range entries { id := entry.Name() // Does it look like a datadir directory? - if !entry.IsDir() || stringid.ValidateID(id) != nil { + if !entry.IsDir() { continue } @@ -304,7 +299,15 @@ func (d *Driver) SupportsShifting() bool { // UpdateLayerIDMap updates ID mappings in a from matching the ones specified // by toContainer to those specified by toHost. func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { - return d.updater.UpdateLayerIDMap(id, toContainer, toHost, mountLabel) + if err := d.updater.UpdateLayerIDMap(id, toContainer, toHost, mountLabel); err != nil { + return err + } + dir := d.dir(id) + rootIDs, err := toHost.ToHost(idtools.IDPair{UID: 0, GID: 0}) + if err != nil { + return err + } + return os.Chown(dir, rootIDs.UID, rootIDs.GID) } // Changes produces a list of changes between the specified layer diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go index 66aa460cf..8c2dc18ae 100644 --- a/vendor/github.com/containers/storage/drivers/windows/windows.go +++ b/vendor/github.com/containers/storage/drivers/windows/windows.go @@ -64,8 +64,7 @@ func init() { } } -type checker struct { -} +type checker struct{} func (c *checker) IsMounted(path string) bool { return false @@ -102,7 +101,7 @@ func InitFilter(home string, options graphdriver.Options) (graphdriver.Driver, e return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home) } - if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil { + if err := idtools.MkdirAllAs(home, 0o700, 0, 0); err != nil { return nil, fmt.Errorf("windowsfilter failed to create '%s': %w", home, err) } @@ -885,7 +884,7 @@ func (d *Driver) resolveID(id string) (string, error) { // setID stores the layerId in disk. func (d *Driver) setID(id, altID string) error { - return os.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) + return os.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0o600) } // getLayerChain returns the layer chain information. @@ -915,7 +914,7 @@ func (d *Driver) setLayerChain(id string, chain []string) error { } jPath := filepath.Join(d.dir(id), "layerchain.json") - err = os.WriteFile(jPath, content, 0600) + err = os.WriteFile(jPath, content, 0o600) if err != nil { return fmt.Errorf("unable to write layerchain file - %s", err) } diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go index aeef64103..e02289784 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -30,7 +30,7 @@ type zfsOptions struct { mountOptions string } -const defaultPerms = os.FileMode(0555) +const defaultPerms = os.FileMode(0o555) func init() { graphdriver.MustRegister("zfs", Init) @@ -57,7 +57,7 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) { return nil, fmt.Errorf("the 'zfs' command is not available: %w", graphdriver.ErrPrerequisites) } - file, err := unix.Open("/dev/zfs", unix.O_RDWR, 0600) + file, err := unix.Open("/dev/zfs", unix.O_RDWR, 0o600) if err != nil { logger.Debugf("cannot open /dev/zfs: %v", err) return nil, fmt.Errorf("could not open /dev/zfs: %v: %w", err, graphdriver.ErrPrerequisites) @@ -110,7 +110,7 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) { if err != nil { return nil, fmt.Errorf("failed to get root uid/gid: %w", err) } - if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(base, 0o700, rootUID, rootGID); err != nil { return nil, fmt.Errorf("failed to create '%s': %w", base, err) } @@ -409,7 +409,6 @@ func (d *Driver) Remove(id string) error { // Get returns the mountpoint for the given id after creating the target directories if necessary. func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { - mountpoint := d.mountPath(id) if count := d.ctr.Increment(mountpoint); count > 1 { return mountpoint, nil @@ -454,7 +453,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr return "", err } // Create the target directories if they don't exist - if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(mountpoint, 0o755, rootUID, rootGID); err != nil { return "", err } @@ -506,7 +505,9 @@ func (d *Driver) Exists(id string) bool { return d.filesystemsCache[d.zfsPath(id)] } -// List layers (not including additional image stores) +// List layers (not including additional image stores). Our layers aren't all +// dependent on a single well-known dataset, so we can't reliably tell which +// datasets are ours and which ones just look like they could be ours. func (d *Driver) ListLayers() ([]string, error) { return nil, graphdriver.ErrNotSupported } diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index 9dd196e8b..d71eab08b 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -568,26 +568,28 @@ func (r *imageStore) Save() error { } r.lockfile.AssertLockedForWriting() rpath := r.imagespath() - if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { + if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil { return err } jdata, err := json.Marshal(&r.images) if err != nil { return err } - if err := ioutils.AtomicWriteFile(rpath, jdata, 0600); err != nil { - return err - } + // This must be done before we write the file, because the process could be terminated + // after the file is written but before the lock file is updated. lw, err := r.lockfile.RecordWrite() if err != nil { return err } r.lastWrite = lw + if err := ioutils.AtomicWriteFile(rpath, jdata, 0o600); err != nil { + return err + } return nil } func newImageStore(dir string) (rwImageStore, error) { - if err := os.MkdirAll(dir, 0700); err != nil { + if err := os.MkdirAll(dir, 0o700); err != nil { return nil, err } lockfile, err := lockfile.GetLockFile(filepath.Join(dir, "images.lock")) @@ -1015,11 +1017,11 @@ func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest if key == "" { return fmt.Errorf("can't set empty name for image big data item: %w", ErrInvalidBigDataName) } - err := os.MkdirAll(r.datadir(image.ID), 0700) + err := os.MkdirAll(r.datadir(image.ID), 0o700) if err != nil { return err } - err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600) + err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0o600) if err == nil { save := false if image.BigDataSizes == nil { diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index 3f37405b0..e5835757f 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -314,9 +314,6 @@ type rwLayerStore interface { // Clean up unreferenced layers GarbageCollect() error - - // supportsShifting() returns true if the driver.Driver.SupportsShifting(). - supportsShifting() bool } type layerStore struct { @@ -657,7 +654,6 @@ func (r *layerStore) Layers() ([]Layer, error) { // Requires startWriting. func (r *layerStore) GarbageCollect() error { layers, err := r.driver.ListLayers() - if err != nil { if errors.Is(err, drivers.ErrNotSupported) { return nil @@ -864,33 +860,35 @@ func (r *layerStore) loadMounts() error { return err } layerMounts := []layerMountPoint{} - if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil { - // Clear all of our mount information. If another process - // unmounted something, it (along with its zero count) won't - // have been encoded into the version of mountpoints.json that - // we're loading, so our count could fall out of sync with it - // if we don't, and if we subsequently change something else, - // we'd pass that error along to other process that reloaded - // the data after we saved it. - for _, layer := range r.layers { - layer.MountPoint = "" - layer.MountCount = 0 - } - // All of the non-zero count values will have been encoded, so - // we reset the still-mounted ones based on the contents. - for _, mount := range layerMounts { - if mount.MountPoint != "" { - if layer, ok := r.lookup(mount.ID); ok { - mounts[mount.MountPoint] = layer - layer.MountPoint = mount.MountPoint - layer.MountCount = mount.MountCount - } + if len(data) != 0 { + if err := json.Unmarshal(data, &layerMounts); err != nil { + return err + } + } + // Clear all of our mount information. If another process + // unmounted something, it (along with its zero count) won't + // have been encoded into the version of mountpoints.json that + // we're loading, so our count could fall out of sync with it + // if we don't, and if we subsequently change something else, + // we'd pass that error along to other process that reloaded + // the data after we saved it. + for _, layer := range r.layers { + layer.MountPoint = "" + layer.MountCount = 0 + } + // All of the non-zero count values will have been encoded, so + // we reset the still-mounted ones based on the contents. + for _, mount := range layerMounts { + if mount.MountPoint != "" { + if layer, ok := r.lookup(mount.ID); ok { + mounts[mount.MountPoint] = layer + layer.MountPoint = mount.MountPoint + layer.MountCount = mount.MountCount } } - err = nil } r.bymount = mounts - return err + return nil } // save saves the contents of the store to disk. @@ -920,13 +918,21 @@ func (r *layerStore) saveLayers(saveLocations layerLocations) error { } r.lockfile.AssertLockedForWriting() + // This must be done before we write the file, because the process could be terminated + // after the file is written but before the lock file is updated. + lw, err := r.lockfile.RecordWrite() + if err != nil { + return err + } + r.lastWrite = lw + for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ { location := layerLocationFromIndex(locationIndex) if location&saveLocations == 0 { continue } rpath := r.jsonPath[locationIndex] - if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { + if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil { return err } subsetLayers := make([]*Layer, 0, len(r.layers)) @@ -944,16 +950,11 @@ func (r *layerStore) saveLayers(saveLocations layerLocations) error { if location == volatileLayerLocation { opts.NoSync = true } - if err := ioutils.AtomicWriteFileWithOpts(rpath, jldata, 0600, &opts); err != nil { + if err := ioutils.AtomicWriteFileWithOpts(rpath, jldata, 0o600, &opts); err != nil { return err } r.layerspathsModified[locationIndex] = opts.ModTime } - lw, err := r.lockfile.RecordWrite() - if err != nil { - return err - } - r.lastWrite = lw return nil } @@ -965,7 +966,7 @@ func (r *layerStore) saveMounts() error { } r.mountsLockfile.AssertLockedForWriting() mpath := r.mountspath() - if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil { + if err := os.MkdirAll(filepath.Dir(mpath), 0o700); err != nil { return err } mounts := make([]layerMountPoint, 0, len(r.layers)) @@ -982,22 +983,26 @@ func (r *layerStore) saveMounts() error { if err != nil { return err } - if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil { - return err - } + + // This must be done before we write the file, because the process could be terminated + // after the file is written but before the lock file is updated. lw, err := r.mountsLockfile.RecordWrite() if err != nil { return err } r.mountsLastWrite = lw + + if err = ioutils.AtomicWriteFile(mpath, jmdata, 0o600); err != nil { + return err + } return r.loadMounts() } func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver, transient bool) (rwLayerStore, error) { - if err := os.MkdirAll(rundir, 0700); err != nil { + if err := os.MkdirAll(rundir, 0o700); err != nil { return nil, err } - if err := os.MkdirAll(layerdir, 0700); err != nil { + if err := os.MkdirAll(layerdir, 0o700); err != nil { return nil, err } // Note: While the containers.lock file is in rundir for transient stores @@ -1213,10 +1218,10 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount if !r.lockfile.IsReadWrite() { return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) } - if err := os.MkdirAll(r.rundir, 0700); err != nil { + if err := os.MkdirAll(r.rundir, 0o700); err != nil { return nil, -1, err } - if err := os.MkdirAll(r.layerdir, 0700); err != nil { + if err := os.MkdirAll(r.layerdir, 0o700); err != nil { return nil, -1, err } if id == "" { @@ -1690,7 +1695,7 @@ func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error if key == "" { return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName) } - err := os.MkdirAll(r.datadir(layer.ID), 0700) + err := os.MkdirAll(r.datadir(layer.ID), 0o700) if err != nil { return err } @@ -1698,7 +1703,7 @@ func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error // NewAtomicFileWriter doesn't overwrite/truncate the existing inode. // BigData() relies on this behaviour when opening the file for read // so that it is either accessing the old data or the new one. - writer, err := ioutils.NewAtomicFileWriter(r.datapath(layer.ID, key), 0600) + writer, err := ioutils.NewAtomicFileWriter(r.datapath(layer.ID, key), 0o600) if err != nil { return fmt.Errorf("opening bigdata file: %w", err) } @@ -1922,6 +1927,18 @@ func (r *layerStore) Wipe() error { return err } } + ids, err := r.driver.ListLayers() + if err != nil { + if !errors.Is(err, drivers.ErrNotSupported) { + return err + } + ids = nil + } + for _, id := range ids { + if err := r.driver.Remove(id); err != nil { + return err + } + } return nil } @@ -2198,7 +2215,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, return -1, err } compression := archive.DetectCompression(header[:n]) - defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), diff) + defragmented := io.MultiReader(bytes.NewReader(header[:n]), diff) // Decide if we need to compute digests var compressedDigest, uncompressedDigest digest.Digest // = "" @@ -2226,54 +2243,63 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, defragmented = io.TeeReader(defragmented, compressedCounter) tsdata := bytes.Buffer{} - compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed) - if err != nil { - compressor = pgzip.NewWriter(&tsdata) - } - if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that - logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err) - } - metadata := storage.NewJSONPacker(compressor) - uncompressed, err := archive.DecompressStream(defragmented) - if err != nil { - return -1, err - } - defer uncompressed.Close() uidLog := make(map[uint32]struct{}) gidLog := make(map[uint32]struct{}) - idLogger, err := tarlog.NewLogger(func(h *tar.Header) { - if !strings.HasPrefix(path.Base(h.Name), archive.WhiteoutPrefix) { - uidLog[uint32(h.Uid)] = struct{}{} - gidLog[uint32(h.Gid)] = struct{}{} + var uncompressedCounter *ioutils.WriteCounter + + size, err = func() (int64, error) { // A scope for defer + compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed) + if err != nil { + return -1, err } - }) - if err != nil { - return -1, err - } - defer idLogger.Close() - uncompressedCounter := ioutils.NewWriteCounter(idLogger) - uncompressedWriter := (io.Writer)(uncompressedCounter) - if uncompressedDigester != nil { - uncompressedWriter = io.MultiWriter(uncompressedWriter, uncompressedDigester.Hash()) - } - payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, uncompressedWriter), metadata, storage.NewDiscardFilePutter()) - if err != nil { - return -1, err - } - options := drivers.ApplyDiffOpts{ - Diff: payload, - Mappings: r.layerMappings(layer), - MountLabel: layer.MountLabel, - } - size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, options) + defer compressor.Close() // This must happen before tsdata is consumed. + if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that + logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err) + } + metadata := storage.NewJSONPacker(compressor) + uncompressed, err := archive.DecompressStream(defragmented) + if err != nil { + return -1, err + } + defer uncompressed.Close() + idLogger, err := tarlog.NewLogger(func(h *tar.Header) { + if !strings.HasPrefix(path.Base(h.Name), archive.WhiteoutPrefix) { + uidLog[uint32(h.Uid)] = struct{}{} + gidLog[uint32(h.Gid)] = struct{}{} + } + }) + if err != nil { + return -1, err + } + defer idLogger.Close() // This must happen before uidLog and gidLog is consumed. + uncompressedCounter = ioutils.NewWriteCounter(idLogger) + uncompressedWriter := (io.Writer)(uncompressedCounter) + if uncompressedDigester != nil { + uncompressedWriter = io.MultiWriter(uncompressedWriter, uncompressedDigester.Hash()) + } + payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, uncompressedWriter), metadata, storage.NewDiscardFilePutter()) + if err != nil { + return -1, err + } + options := drivers.ApplyDiffOpts{ + Diff: payload, + Mappings: r.layerMappings(layer), + MountLabel: layer.MountLabel, + } + size, err := r.driver.ApplyDiff(layer.ID, layer.Parent, options) + if err != nil { + return -1, err + } + return size, err + }() if err != nil { return -1, err } - compressor.Close() - if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil { + + if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0o700); err != nil { return -1, err } - if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0600); err != nil { + if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0o600); err != nil { return -1, err } if compressedDigester != nil { @@ -2366,8 +2392,26 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, layer.UncompressedDigest = diffOutput.UncompressedDigest layer.UncompressedSize = diffOutput.Size layer.Metadata = diffOutput.Metadata - if err = r.saveFor(layer); err != nil { - return err + if len(diffOutput.TarSplit) != 0 { + tsdata := bytes.Buffer{} + compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed) + if err != nil { + compressor = pgzip.NewWriter(&tsdata) + } + if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that + logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err) + } + if _, err := compressor.Write(diffOutput.TarSplit); err != nil { + compressor.Close() + return err + } + compressor.Close() + if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0o700); err != nil { + return err + } + if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0o600); err != nil { + return err + } } for k, v := range diffOutput.BigData { if err := r.SetBigData(id, k, bytes.NewReader(v)); err != nil { @@ -2377,6 +2421,9 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, return err } } + if err = r.saveFor(layer); err != nil { + return err + } return err } @@ -2443,10 +2490,6 @@ func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error return r.layersByDigestMap(r.byuncompressedsum, d) } -func (r *layerStore) supportsShifting() bool { - return r.driver.SupportsShifting() -} - func closeAll(closes ...func() error) (rErr error) { for _, f := range closes { if err := f(); err != nil { diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 6209205b3..408e4599c 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -132,13 +132,13 @@ const ( ) const ( - modeISDIR = 040000 // Directory - modeISFIFO = 010000 // FIFO - modeISREG = 0100000 // Regular file - modeISLNK = 0120000 // Symbolic link - modeISBLK = 060000 // Block special file - modeISCHR = 020000 // Character special file - modeISSOCK = 0140000 // Socket + modeISDIR = 0o40000 // Directory + modeISFIFO = 0o10000 // FIFO + modeISREG = 0o100000 // Regular file + modeISLNK = 0o120000 // Symbolic link + modeISBLK = 0o60000 // Block special file + modeISCHR = 0o20000 // Character special file + modeISSOCK = 0o140000 // Socket ) // IsArchivePath checks if the (possibly compressed) file at the given path @@ -328,7 +328,6 @@ func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModi } pipeWriter.Close() - }() return pipeReader } @@ -552,9 +551,9 @@ func (ta *tarAppender) addTarFile(path, name string) error { } } - //handle re-mapping container ID mappings back to host ID mappings before - //writing tar headers/files. We skip whiteout files because they were written - //by the kernel and already have proper ownership relative to the host + // handle re-mapping container ID mappings back to host ID mappings before + // writing tar headers/files. We skip whiteout files because they were written + // by the kernel and already have proper ownership relative to the host if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { @@ -702,7 +701,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } if forceMask != nil && (hdr.Typeflag != tar.TypeSymlink || runtime.GOOS == "darwin") { - value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&07777) + value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&0o7777) if err := system.Lsetxattr(path, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil { return err } @@ -800,7 +799,6 @@ func Tar(path string, compression Compression) (io.ReadCloser, error) { // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) @@ -1032,7 +1030,7 @@ loop: parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) + err = idtools.MkdirAllAndChownNew(parentPath, 0o777, rootIDs) if err != nil { return err } @@ -1239,7 +1237,7 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error { } // Create dst, copy src's content into it logrus.Debugf("Creating dest directory: %s", dst) - if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + if err := idtools.MkdirAllAndChownNew(dst, 0o755, rootIDs); err != nil { return err } logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) @@ -1266,7 +1264,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary - if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil { + if err := os.MkdirAll(filepath.Dir(dst), 0o700); err != nil { return err } @@ -1422,7 +1420,7 @@ func IsArchive(header []byte) bool { if compression != Uncompressed { return true } - r := tar.NewReader(bytes.NewBuffer(header)) + r := tar.NewReader(bytes.NewReader(header)) _, err := r.Next() return err == nil } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go index 775bd0766..02995d767 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -153,8 +153,7 @@ func (overlayWhiteoutConverter) ConvertReadWithHandler(hdr *tar.Header, path str return true, nil } -type directHandler struct { -} +type directHandler struct{} func (d directHandler) Setxattr(path, name string, value []byte) error { return unix.Setxattr(path, name, value, 0) @@ -185,7 +184,7 @@ func GetFileOwner(path string) (uint32, uint32, uint32, error) { } s, ok := f.Sys().(*syscall.Stat_t) if ok { - return s.Uid, s.Gid, s.Mode & 07777, nil + return s.Uid, s.Gid, s.Mode & 0o7777, nil } return 0, 0, uint32(f.Mode()), nil } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go index f8a34c831..88192f220 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go @@ -88,7 +88,7 @@ func minor(device uint64) uint64 { // handleTarTypeBlockCharFifo is an OS-specific helper function used by // createTarFile to handle the following types of header: Block; Char; Fifo func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - mode := uint32(hdr.Mode & 07777) + mode := uint32(hdr.Mode & 0o7777) switch hdr.Typeflag { case tar.TypeBlock: mode |= unix.S_IFBLK diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go index e44011775..85a5b3a5d 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go @@ -38,18 +38,17 @@ func CanonicalTarNameForPath(p string) (string, error) { return "", fmt.Errorf("windows path contains forward slash: %s", p) } return strings.Replace(p, string(os.PathSeparator), "/", -1), nil - } // chmodTarEntry is used to adjust the file permissions used in tar header based // on the platform the archival is done. func chmodTarEntry(perm os.FileMode) os.FileMode { - //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + // perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) permPart := perm & os.ModePerm noPermPart := perm &^ os.ModePerm // Add the x bit: make everything +x from windows - permPart |= 0111 - permPart &= 0755 + permPart |= 0o111 + permPart &= 0o755 return noPermPart | permPart } diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go index fc705484e..01c6f30c2 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes.go @@ -131,9 +131,11 @@ func isENOTDIR(err error) bool { return false } -type skipChange func(string) (bool, error) -type deleteChange func(string, string, os.FileInfo) (string, error) -type whiteoutChange func(string, string) (bool, error) +type ( + skipChange func(string) (bool, error) + deleteChange func(string, string, os.FileInfo) (string, error) + whiteoutChange func(string, string) (bool, error) +) func changes(layers []string, rw string, dc deleteChange, sc skipChange, wc whiteoutChange) ([]Change, error) { var ( @@ -299,7 +301,6 @@ func (info *FileInfo) path() string { } func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { - sizeAtEntry := len(*changes) if oldInfo == nil { @@ -373,7 +374,6 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) (*changes)[sizeAtEntry] = change } - } // Changes add changes to file information. @@ -398,9 +398,7 @@ func newRootFileInfo(idMappings *idtools.IDMappings) *FileInfo { // ChangesDirs compares two directories and generates an array of Change objects describing the changes. // If oldDir is "", then all files in newDir will be Add-Changes. func ChangesDirs(newDir string, newMappings *idtools.IDMappings, oldDir string, oldMappings *idtools.IDMappings) ([]Change, error) { - var ( - oldRoot, newRoot *FileInfo - ) + var oldRoot, newRoot *FileInfo if oldDir == "" { emptyDir, err := os.MkdirTemp("", "empty") if err != nil { diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go index 77d3d6f51..f8414717b 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go @@ -397,5 +397,4 @@ func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (str // We didn't find the same path in any older layers, so it was new in this one. return "", nil - } diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go index 966400e59..1bab94fa5 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go @@ -7,7 +7,6 @@ import ( ) func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool { - // Don't look at size for dirs, its not a good measure of change if oldStat.Mtim() != newStat.Mtim() || oldStat.Mode() != newStat.Mode() || diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go index 2c714e8da..55f753bf4 100644 --- a/vendor/github.com/containers/storage/pkg/archive/copy.go +++ b/vendor/github.com/containers/storage/pkg/archive/copy.go @@ -297,7 +297,6 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir } return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil } - } // RebaseArchiveEntries rewrites the given srcContent archive replacing diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go index 8fec5af38..713551859 100644 --- a/vendor/github.com/containers/storage/pkg/archive/diff.go +++ b/vendor/github.com/containers/storage/pkg/archive/diff.go @@ -85,7 +85,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = os.MkdirAll(parentPath, 0755) + err = os.MkdirAll(parentPath, 0o755) if err != nil { return 0, err } diff --git a/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go index 14661c411..92b8d05ed 100644 --- a/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go +++ b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go @@ -98,7 +98,7 @@ func parseFileFlags(fflags string) (uint32, uint32, error) { } func formatFileFlags(fflags uint32) (string, error) { - var res = []string{} + res := []string{} for fflags != 0 { // Extract lowest set bit fflag := uint32(1) << bits.TrailingZeros32(fflags) diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go index 2de95f39a..f221a2283 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go @@ -77,7 +77,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions dest = filepath.Clean(dest) if _, err := os.Stat(dest); os.IsNotExist(err) { - if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil { + if err := idtools.MkdirAllAndChownNew(dest, 0o755, rootIDs); err != nil { return err } } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go index 42ee39f48..f7a16e9f9 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go @@ -8,7 +8,8 @@ import ( func invokeUnpack(decompressedArchive io.Reader, dest string, - options *archive.TarOptions, root string) error { + options *archive.TarOptions, root string, +) error { return archive.Unpack(decompressedArchive, dest, options) } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go index 8cc0f33b3..259f8c99a 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go @@ -27,7 +27,7 @@ func untar() { var options archive.TarOptions - //read the options from the pipe "ExtraFiles" + // read the options from the pipe "ExtraFiles" if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { fatal(err) } @@ -99,7 +99,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T return fmt.Errorf("untar error on re-exec cmd: %w", err) } - //write the options to the pipe for the untar exec to read + // write the options to the pipe for the untar exec to read if err := json.NewEncoder(w).Encode(options); err != nil { w.Close() return fmt.Errorf("untar json encode to pipe failed: %w", err) diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go index 1395ff8cd..745502204 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go @@ -14,7 +14,8 @@ func chroot(path string) error { func invokeUnpack(decompressedArchive io.Reader, dest string, - options *archive.TarOptions, root string) error { + options *archive.TarOptions, root string, +) error { // Windows is different to Linux here because Windows does not support // chroot. Hence there is no point sandboxing a chrooted process to // do the unpack. We call inline instead within the daemon process. diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go index 90f453913..71ed094d1 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go @@ -26,7 +26,6 @@ type applyLayerResponse struct { // used on Windows as it does not support chroot, hence no point sandboxing // through chroot and rexec. func applyLayer() { - var ( tmpDir string err error diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go index 7efd12373..cd13212e6 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -48,8 +48,10 @@ type layersCache struct { created time.Time } -var cacheMutex sync.Mutex -var cache *layersCache +var ( + cacheMutex sync.Mutex + cache *layersCache +) func (c *layersCache) release() { cacheMutex.Lock() @@ -514,14 +516,14 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) { iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { - if field != "entries" { + if strings.ToLower(field) != "entries" { iter.Skip() continue } for iter.ReadArray() { for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { - switch field { - case "type", "name", "linkName", "digest", "chunkDigest", "chunkType": + switch strings.ToLower(field) { + case "type", "name", "linkname", "digest", "chunkdigest", "chunktype", "modtime", "accesstime", "changetime": count += len(iter.ReadStringAsSlice()) case "xattrs": for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { @@ -546,33 +548,33 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) { iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { - if field == "version" { + if strings.ToLower(field) == "version" { toc.Version = iter.ReadInt() continue } - if field != "entries" { + if strings.ToLower(field) != "entries" { iter.Skip() continue } for iter.ReadArray() { var m internal.FileMetadata for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { - switch field { + switch strings.ToLower(field) { case "type": m.Type = getString(iter.ReadStringAsSlice()) case "name": m.Name = getString(iter.ReadStringAsSlice()) - case "linkName": + case "linkname": m.Linkname = getString(iter.ReadStringAsSlice()) case "mode": m.Mode = iter.ReadInt64() case "size": m.Size = iter.ReadInt64() - case "UID": + case "uid": m.UID = iter.ReadInt() - case "GID": + case "gid": m.GID = iter.ReadInt() - case "ModTime": + case "modtime": time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) if err != nil { return nil, err @@ -590,23 +592,23 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) { return nil, err } m.ChangeTime = &time - case "devMajor": + case "devmajor": m.Devmajor = iter.ReadInt64() - case "devMinor": + case "devminor": m.Devminor = iter.ReadInt64() case "digest": m.Digest = getString(iter.ReadStringAsSlice()) case "offset": m.Offset = iter.ReadInt64() - case "endOffset": + case "endoffset": m.EndOffset = iter.ReadInt64() - case "chunkSize": + case "chunksize": m.ChunkSize = iter.ReadInt64() - case "chunkOffset": + case "chunkoffset": m.ChunkOffset = iter.ReadInt64() - case "chunkDigest": + case "chunkdigest": m.ChunkDigest = getString(iter.ReadStringAsSlice()) - case "chunkType": + case "chunktype": m.ChunkType = getString(iter.ReadStringAsSlice()) case "xattrs": m.Xattrs = make(map[string]string) diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go index 9333ed65c..2ee79dd23 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go @@ -150,22 +150,32 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, // readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must // be specified. // This function uses the io.github.containers.zstd-chunked. annotations when specified. -func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) { +func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, []byte, int64, error) { footerSize := int64(internal.FooterSizeSupported) if blobSize <= footerSize { - return nil, 0, errors.New("blob too small") + return nil, nil, 0, errors.New("blob too small") } manifestChecksumAnnotation := annotations[internal.ManifestChecksumKey] if manifestChecksumAnnotation == "" { - return nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey) + return nil, nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey) } var offset, length, lengthUncompressed, manifestType uint64 + var offsetTarSplit, lengthTarSplit, lengthUncompressedTarSplit uint64 + tarSplitChecksumAnnotation := "" + if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" { if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil { - return nil, 0, err + return nil, nil, 0, err + } + + if tarSplitInfoKeyAnnotation, found := annotations[internal.TarSplitInfoKey]; found { + if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &offsetTarSplit, &lengthTarSplit, &lengthUncompressedTarSplit); err != nil { + return nil, nil, 0, err + } + tarSplitChecksumAnnotation = annotations[internal.TarSplitChecksumKey] } } else { chunk := ImageSourceChunk{ @@ -174,39 +184,39 @@ func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable } parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) if err != nil { - return nil, 0, err + return nil, nil, 0, err } var reader io.ReadCloser select { case r := <-parts: reader = r case err := <-errs: - return nil, 0, err + return nil, nil, 0, err } footer := make([]byte, footerSize) if _, err := io.ReadFull(reader, footer); err != nil { - return nil, 0, err + return nil, nil, 0, err } offset = binary.LittleEndian.Uint64(footer[0:8]) length = binary.LittleEndian.Uint64(footer[8:16]) lengthUncompressed = binary.LittleEndian.Uint64(footer[16:24]) manifestType = binary.LittleEndian.Uint64(footer[24:32]) - if !isZstdChunkedFrameMagic(footer[32:40]) { - return nil, 0, errors.New("invalid magic number") + if !isZstdChunkedFrameMagic(footer[48:56]) { + return nil, nil, 0, errors.New("invalid magic number") } } if manifestType != internal.ManifestTypeCRFS { - return nil, 0, errors.New("invalid manifest type") + return nil, nil, 0, errors.New("invalid manifest type") } // set a reasonable limit if length > (1<<20)*50 { - return nil, 0, errors.New("manifest too big") + return nil, nil, 0, errors.New("manifest too big") } if lengthUncompressed > (1<<20)*50 { - return nil, 0, errors.New("manifest too big") + return nil, nil, 0, errors.New("manifest too big") } chunk := ImageSourceChunk{ @@ -214,47 +224,86 @@ func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable Length: length, } - parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) + chunks := []ImageSourceChunk{chunk} + + if offsetTarSplit > 0 { + chunkTarSplit := ImageSourceChunk{ + Offset: offsetTarSplit, + Length: lengthTarSplit, + } + chunks = append(chunks, chunkTarSplit) + } + + parts, errs, err := blobStream.GetBlobAt(chunks) if err != nil { - return nil, 0, err + return nil, nil, 0, err } - var reader io.ReadCloser - select { - case r := <-parts: - reader = r - case err := <-errs: - return nil, 0, err + + readBlob := func(len uint64) ([]byte, error) { + var reader io.ReadCloser + select { + case r := <-parts: + reader = r + case err := <-errs: + return nil, err + } + + blob := make([]byte, len) + if _, err := io.ReadFull(reader, blob); err != nil { + reader.Close() + return nil, err + } + if err := reader.Close(); err != nil { + return nil, err + } + return blob, nil } - manifest := make([]byte, length) - if _, err := io.ReadFull(reader, manifest); err != nil { - return nil, 0, err + manifest, err := readBlob(length) + if err != nil { + return nil, nil, 0, err } - manifestDigester := digest.Canonical.Digester() - manifestChecksum := manifestDigester.Hash() - if _, err := manifestChecksum.Write(manifest); err != nil { - return nil, 0, err + decodedBlob, err := decodeAndValidateBlob(manifest, lengthUncompressed, manifestChecksumAnnotation) + if err != nil { + return nil, nil, 0, err + } + decodedTarSplit := []byte{} + if offsetTarSplit > 0 { + tarSplit, err := readBlob(lengthTarSplit) + if err != nil { + return nil, nil, 0, err + } + + decodedTarSplit, err = decodeAndValidateBlob(tarSplit, lengthUncompressedTarSplit, tarSplitChecksumAnnotation) + if err != nil { + return nil, nil, 0, err + } } + return decodedBlob, decodedTarSplit, int64(offset), err +} - d, err := digest.Parse(manifestChecksumAnnotation) +func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedUncompressedChecksum string) ([]byte, error) { + d, err := digest.Parse(expectedUncompressedChecksum) if err != nil { - return nil, 0, err + return nil, err } - if manifestDigester.Digest() != d { - return nil, 0, errors.New("invalid manifest checksum") + + blobDigester := d.Algorithm().Digester() + blobChecksum := blobDigester.Hash() + if _, err := blobChecksum.Write(blob); err != nil { + return nil, err + } + if blobDigester.Digest() != d { + return nil, fmt.Errorf("invalid blob checksum, expected checksum %s, got %s", d, blobDigester.Digest()) } decoder, err := zstd.NewReader(nil) //nolint:contextcheck if err != nil { - return nil, 0, err + return nil, err } defer decoder.Close() b := make([]byte, 0, lengthUncompressed) - if decoded, err := decoder.DecodeAll(manifest, b); err == nil { - return decoded, int64(offset), nil - } - - return manifest, int64(offset), nil + return decoder.DecodeAll(blob, b) } diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go index 2a9bdc675..ca7ce30f7 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go @@ -6,17 +6,23 @@ package compressor import ( "bufio" + "bytes" "encoding/base64" "io" "github.com/containers/storage/pkg/chunked/internal" "github.com/containers/storage/pkg/ioutils" + "github.com/klauspost/compress/zstd" "github.com/opencontainers/go-digest" "github.com/vbatts/tar-split/archive/tar" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" ) -const RollsumBits = 16 -const holesThreshold = int64(1 << 10) +const ( + RollsumBits = 16 + holesThreshold = int64(1 << 10) +) type holesFinder struct { reader *bufio.Reader @@ -196,11 +202,55 @@ type chunk struct { ChunkType string } +type tarSplitData struct { + compressed *bytes.Buffer + digester digest.Digester + uncompressedCounter *ioutils.WriteCounter + zstd *zstd.Encoder + packer storage.Packer +} + +func newTarSplitData(level int) (*tarSplitData, error) { + compressed := bytes.NewBuffer(nil) + digester := digest.Canonical.Digester() + + zstdWriter, err := internal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level) + if err != nil { + return nil, err + } + + uncompressedCounter := ioutils.NewWriteCounter(zstdWriter) + metaPacker := storage.NewJSONPacker(uncompressedCounter) + + return &tarSplitData{ + compressed: compressed, + digester: digester, + uncompressedCounter: uncompressedCounter, + zstd: zstdWriter, + packer: metaPacker, + }, nil +} + func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error { // total written so far. Used to retrieve partial offsets in the file dest := ioutils.NewWriteCounter(destFile) - tr := tar.NewReader(reader) + tarSplitData, err := newTarSplitData(level) + if err != nil { + return err + } + defer func() { + if tarSplitData.zstd != nil { + tarSplitData.zstd.Close() + } + }() + + its, err := asm.NewInputTarStream(reader, tarSplitData.packer, nil) + if err != nil { + return err + } + + tr := tar.NewReader(its) tr.RawAccounting = true buf := make([]byte, 4096) @@ -212,7 +262,6 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r defer func() { if zstdWriter != nil { zstdWriter.Close() - zstdWriter.Flush() } }() @@ -222,9 +271,6 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r if err := zstdWriter.Close(); err != nil { return 0, err } - if err := zstdWriter.Flush(); err != nil { - return 0, err - } offset = dest.Count zstdWriter.Reset(dest) } @@ -371,9 +417,11 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r rawBytes := tr.RawBytes() if _, err := zstdWriter.Write(rawBytes); err != nil { + zstdWriter.Close() return err } if err := zstdWriter.Flush(); err != nil { + zstdWriter.Close() return err } if err := zstdWriter.Close(); err != nil { @@ -381,7 +429,21 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r } zstdWriter = nil - return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), metadata, level) + if err := tarSplitData.zstd.Flush(); err != nil { + return err + } + if err := tarSplitData.zstd.Close(); err != nil { + return err + } + tarSplitData.zstd = nil + + ts := internal.TarSplitData{ + Data: tarSplitData.compressed.Bytes(), + Digest: tarSplitData.digester.Digest(), + UncompressedSize: tarSplitData.uncompressedCounter.Count, + } + + return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level) } type zstdChunkedWriter struct { diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go index f4dfad822..59df6901e 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go @@ -25,11 +25,15 @@ import ( "math/bits" ) -const windowSize = 64 // Roll assumes windowSize is a power of 2 -const charOffset = 31 +const ( + windowSize = 64 // Roll assumes windowSize is a power of 2 + charOffset = 31 +) -const blobBits = 13 -const blobSize = 1 << blobBits // 8k +const ( + blobBits = 13 + blobSize = 1 << blobBits // 8k +) type RollSum struct { s1, s2 uint32 diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go index 092b03533..49074eadf 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go +++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go @@ -90,6 +90,8 @@ func GetType(t byte) (string, error) { const ( ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum" ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position" + TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum" + TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position" // ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file. ManifestTypeCRFS = 1 @@ -97,7 +99,7 @@ const ( // FooterSizeSupported is the footer size supported by this implementation. // Newer versions of the image format might increase this value, so reject // any version that is not supported. - FooterSizeSupported = 40 + FooterSizeSupported = 56 ) var ( @@ -125,16 +127,23 @@ func appendZstdSkippableFrame(dest io.Writer, data []byte) error { return nil } -func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, metadata []FileMetadata, level int) error { +type TarSplitData struct { + Data []byte + Digest digest.Digest + UncompressedSize int64 +} + +func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, tarSplitData *TarSplitData, metadata []FileMetadata, level int) error { // 8 is the size of the zstd skippable frame header + the frame size - manifestOffset := offset + 8 + const zstdSkippableFrameHeader = 8 + manifestOffset := offset + zstdSkippableFrameHeader toc := TOC{ Version: 1, Entries: metadata, } - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary // Generate the manifest manifest, err := json.Marshal(toc) if err != nil { @@ -167,13 +176,20 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off return err } + outMetadata[TarSplitChecksumKey] = tarSplitData.Digest.String() + tarSplitOffset := manifestOffset + uint64(len(compressedManifest)) + zstdSkippableFrameHeader + outMetadata[TarSplitInfoKey] = fmt.Sprintf("%d:%d:%d", tarSplitOffset, len(tarSplitData.Data), tarSplitData.UncompressedSize) + if err := appendZstdSkippableFrame(dest, tarSplitData.Data); err != nil { + return err + } + // Store the offset to the manifest and its size in LE order manifestDataLE := make([]byte, FooterSizeSupported) binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset) - binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest))) - binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest))) - binary.LittleEndian.PutUint64(manifestDataLE[24:], uint64(ManifestTypeCRFS)) - copy(manifestDataLE[32:], ZstdChunkedFrameMagic) + binary.LittleEndian.PutUint64(manifestDataLE[8*1:], uint64(len(compressedManifest))) + binary.LittleEndian.PutUint64(manifestDataLE[8*2:], uint64(len(manifest))) + binary.LittleEndian.PutUint64(manifestDataLE[8*3:], uint64(ManifestTypeCRFS)) + copy(manifestDataLE[8*4:], ZstdChunkedFrameMagic) return appendZstdSkippableFrame(dest, manifestDataLE) } diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index adc1ad398..a80b28fb5 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -55,6 +55,7 @@ type compressedFileType int type chunkedDiffer struct { stream ImageSourceSeekable manifest []byte + tarSplit []byte layersCache *layersCache tocOffset int64 fileType compressedFileType @@ -64,6 +65,8 @@ type chunkedDiffer struct { gzipReader *pgzip.Reader zstdReader *zstd.Decoder rawReader io.Reader + + tocDigest digest.Digest } var xattrsToIgnore = map[string]interface{}{ @@ -135,6 +138,26 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us return dstFile, st.Size(), nil } +// GetTOCDigest returns the digest of the TOC as recorded in the annotations. +// This is an experimental feature and may be changed/removed in the future. +func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) { + if tocDigest, ok := annotations[estargz.TOCJSONDigestAnnotation]; ok { + d, err := digest.Parse(tocDigest) + if err != nil { + return nil, err + } + return &d, nil + } + if tocDigest, ok := annotations[internal.ManifestChecksumKey]; ok { + d, err := digest.Parse(tocDigest) + if err != nil { + return nil, err + } + return &d, nil + } + return nil, nil +} + // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { if _, ok := annotations[internal.ManifestChecksumKey]; ok { @@ -147,7 +170,7 @@ func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotat } func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) { - manifest, tocOffset, err := readZstdChunkedManifest(ctx, iss, blobSize, annotations) + manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(ctx, iss, blobSize, annotations) if err != nil { return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } @@ -156,13 +179,20 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in return nil, err } + tocDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey]) + if err != nil { + return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[internal.ManifestChecksumKey], err) + } + return &chunkedDiffer{ copyBuffer: makeCopyBuffer(), - stream: iss, - manifest: manifest, + fileType: fileTypeZstdChunked, layersCache: layersCache, + manifest: manifest, + stream: iss, + tarSplit: tarSplit, tocOffset: tocOffset, - fileType: fileTypeZstdChunked, + tocDigest: tocDigest, }, nil } @@ -176,6 +206,11 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize return nil, err } + tocDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation]) + if err != nil { + return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[estargz.TOCJSONDigestAnnotation], err) + } + return &chunkedDiffer{ copyBuffer: makeCopyBuffer(), stream: iss, @@ -183,6 +218,7 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize layersCache: layersCache, tocOffset: tocOffset, fileType: fileTypeEstargz, + tocDigest: tocDigest, }, nil } @@ -363,6 +399,24 @@ func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOption return nil } +func mapToSlice(inputMap map[uint32]struct{}) []uint32 { + var out []uint32 + for value := range inputMap { + out = append(out, value) + } + return out +} + +func collectIDs(entries []internal.FileMetadata) ([]uint32, []uint32) { + uids := make(map[uint32]struct{}) + gids := make(map[uint32]struct{}) + for _, entry := range entries { + uids[uint32(entry.UID)] = struct{}{} + gids[uint32(entry.GID)] = struct{}{} + } + return mapToSlice(uids), mapToSlice(gids) +} + type originFile struct { Root string Path string @@ -558,7 +612,7 @@ func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.Fil func openFileUnderRootOpenat2(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { how := unix.OpenHow{ Flags: flags, - Mode: uint64(mode & 07777), + Mode: uint64(mode & 0o7777), Resolve: unix.RESOLVE_IN_ROOT, } return unix.Openat2(dirfd, name, &how) @@ -636,7 +690,7 @@ func openOrCreateDirUnderRoot(name string, dirfd int, mode os.FileMode) (*os.Fil baseName := filepath.Base(name) - if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, 0755); err2 != nil { + if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, 0o755); err2 != nil { return nil, err } @@ -1271,12 +1325,13 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra } }() - bigData := map[string][]byte{ - bigDataKey: c.manifest, - } output := graphdriver.DriverWithDifferOutput{ - Differ: c, - BigData: bigData, + Differ: c, + TarSplit: c.tarSplit, + BigData: map[string][]byte{ + bigDataKey: c.manifest, + }, + TOCDigest: c.tocDigest, } storeOpts, err := types.DefaultStoreOptionsAutoDetectUID() @@ -1305,6 +1360,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra var missingParts []missingPart + output.UIDs, output.GIDs = collectIDs(toc.Entries) + mergedEntries, totalSize, err := c.mergeTocEntries(c.fileType, toc.Entries) if err != nil { return output, err @@ -1384,7 +1441,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra filesToWaitFor := 0 for i, r := range mergedEntries { if options.ForceMask != nil { - value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&07777) + value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&0o7777) r.Xattrs[containersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value)) r.Mode = int64(*options.ForceMask) } @@ -1579,6 +1636,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra if totalChunksSize > 0 { logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize)) } + return output, nil } diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go index 4d952aba3..cc37ab1d8 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go @@ -9,9 +9,16 @@ import ( storage "github.com/containers/storage" graphdriver "github.com/containers/storage/drivers" + digest "github.com/opencontainers/go-digest" ) // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { - return nil, errors.New("format not supported on this architecture") + return nil, errors.New("format not supported on this system") +} + +// GetTOCDigest returns the digest of the TOC as recorded in the annotations. +// This is an experimental feature and may be changed/removed in the future. +func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) { + return nil, errors.New("format not supported on this system") } diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go index f6e0cfcfe..20d72ca89 100644 --- a/vendor/github.com/containers/storage/pkg/config/config.go +++ b/vendor/github.com/containers/storage/pkg/config/config.go @@ -124,6 +124,11 @@ type OptionsConfig struct { // for shared image content AdditionalImageStores []string `toml:"additionalimagestores,omitempty"` + // ImageStore is the location of image store which is separated from the + // container store. Usually this is not recommended unless users wants + // separate store for image and containers. + ImageStore string `toml:"imagestore,omitempty"` + // AdditionalLayerStores is the location of additional read/only // Layer stores. Usually used to access Networked File System // for shared image content diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go index 6b9a7afcd..33bf7184e 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go @@ -239,8 +239,8 @@ func (t *Task) getDriverVersion() (string, error) { } func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64, - length uint64, targetType string, params string) { - + length uint64, targetType string, params string, +) { return DmGetNextTarget(t.unmanaged, next, &start, &length, &targetType, ¶ms), start, length, targetType, params @@ -345,8 +345,7 @@ func RemoveDeviceDeferred(name string) error { // disable udev dm rules and delete the symlink under /dev/mapper by itself, // even if the removal is deferred by the kernel. cookie := new(uint) - var flags uint16 - flags = DmUdevDisableLibraryFallback + flags := uint16(DmUdevDisableLibraryFallback) if err := task.setCookie(cookie, flags); err != nil { return fmt.Errorf("devicemapper: Can not set cookie: %s", err) } @@ -384,7 +383,7 @@ func CancelDeferredRemove(deviceName string) error { return fmt.Errorf("devicemapper: Can't set sector %s", err) } - if err := task.setMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil { + if err := task.setMessage("@cancel_deferred_remove"); err != nil { return fmt.Errorf("devicemapper: Can't set message %s", err) } @@ -459,8 +458,7 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize } cookie := new(uint) - var flags uint16 - flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag + flags := uint16(DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag) if err := task.setCookie(cookie, flags); err != nil { return fmt.Errorf("devicemapper: Can't set cookie %s", err) } diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go index 7baca8126..9aef4c2fb 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go @@ -138,8 +138,8 @@ func dmTaskSetRoFct(task *cdmTask) int { } func dmTaskAddTargetFct(task *cdmTask, - start, size uint64, ttype, params string) int { - + start, size uint64, ttype, params string, +) int { Cttype := C.CString(ttype) defer free(Cttype) @@ -156,12 +156,11 @@ func dmTaskGetDepsFct(task *cdmTask) *Deps { } // golang issue: https://github.com/golang/go/issues/11925 - hdr := reflect.SliceHeader{ - Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))), - Len: int(Cdeps.count), - Cap: int(Cdeps.count), - } - devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr)) + var devices []C.uint64_t + devicesHdr := (*reflect.SliceHeader)(unsafe.Pointer(&devices)) + devicesHdr.Data = uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))) + devicesHdr.Len = int(Cdeps.count) + devicesHdr.Cap = int(Cdeps.count) deps := &Deps{ Count: uint32(Cdeps.count), diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go index bcc2109b6..9d0714b1b 100644 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go @@ -183,7 +183,6 @@ func (p *Pattern) Exclusion() bool { } func (p *Pattern) match(path string) (bool, error) { - if p.regexp == nil { if err := p.compile(); err != nil { return false, filepath.ErrBadPattern @@ -356,12 +355,12 @@ func CreateIfNotExists(path string, isDir bool) error { if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { if isDir { - return os.MkdirAll(path, 0755) + return os.MkdirAll(path, 0o755) } - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { return err } - f, err := os.OpenFile(path, os.O_CREATE, 0755) + f, err := os.OpenFile(path, os.O_CREATE, 0o755) if err != nil { return err } diff --git a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go index f52239f87..68c8c867d 100644 --- a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go +++ b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go @@ -104,7 +104,7 @@ func CreateIDMappedMount(source, target string, pid int) error { &attr, uint(unsafe.Sizeof(attr))); err != nil { return err } - if err := os.Mkdir(target, 0700); err != nil && !os.IsExist(err) { + if err := os.Mkdir(target, 0o700); err != nil && !os.IsExist(err) { return err } return moveMount(targetDirFd, target) @@ -140,7 +140,7 @@ func CreateUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int, for _, m := range idmap { mappings = mappings + fmt.Sprintf("%d %d %d\n", m.ContainerID, m.HostID, m.Size) } - return os.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0600) + return os.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0o600) } if err := writeMappings("uid_map", uidMaps); err != nil { cleanupFunc() diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go index daff1e4a9..4701dc5ac 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -91,13 +91,13 @@ func CanAccess(path string, pair IDPair) bool { } func accessible(isOwner, isGroup bool, perms os.FileMode) bool { - if isOwner && (perms&0100 == 0100) { + if isOwner && (perms&0o100 == 0o100) { return true } - if isGroup && (perms&0010 == 0010) { + if isGroup && (perms&0o010 == 0o010) { return true } - if perms&0001 == 0001 { + if perms&0o001 == 0o001 { return true } return false diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go index 40e507f77..ac27718de 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go @@ -89,7 +89,6 @@ func addUser(userName string) error { } func createSubordinateRanges(name string) error { - // first, we should verify that ranges weren't automatically created // by the distro tooling ranges, err := readSubuid(name) diff --git a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go index 33a7dee6c..b3772bdb3 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go @@ -19,8 +19,8 @@ func resolveBinary(binname string) (string, error) { if err != nil { return "", err } - //only return no error if the final resolved binary basename - //matches what was searched for + // only return no error if the final resolved binary basename + // matches what was searched for if filepath.Base(resolvedPath) == binname { return resolvedPath, nil } diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go index 335980914..a357b809e 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go @@ -135,7 +135,7 @@ func openLock(path string, ro bool) (fd int, err error) { // the directory of the lockfile seems to be removed, try to create it if os.IsNotExist(err) { - if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { + if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil { return fd, fmt.Errorf("creating lock file directory: %w", err) } diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go index 09f2aca5c..ca27a483d 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go @@ -138,6 +138,7 @@ func (l *LockFile) Modified() (bool, error) { func (l *LockFile) Touch() error { return nil } + func (l *LockFile) IsReadWrite() bool { return false } diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go index de10e3324..b8bfa5897 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go +++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go @@ -26,7 +26,7 @@ func stringToLoopName(src string) [LoNameSize]uint8 { } func getNextFreeLoopbackIndex() (int, error) { - f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) + f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0o644) if err != nil { return 0, err } @@ -67,7 +67,7 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File } // OpenFile adds O_CLOEXEC - loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) + loopFile, err = os.OpenFile(target, os.O_RDWR, 0o644) if err != nil { logrus.Errorf("Opening loopback device: %s", err) return nil, ErrAttachLoopbackDevice @@ -114,7 +114,6 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File // AttachLoopDevice attaches the given sparse file to the next // available loopback device. It returns an opened *os.File. func AttachLoopDevice(sparseName string) (loop *os.File, err error) { - // Try to retrieve the next available loopback device via syscall. // If it fails, we discard error and start looping for a // loopback from index 0. @@ -124,7 +123,7 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) { } // OpenFile adds O_CLOEXEC - sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) + sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0o644) if err != nil { logrus.Errorf("Opening sparse file: %v", err) return nil, ErrAttachLoopbackDevice diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go index b30da9fad..4b7fdee83 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go @@ -24,7 +24,6 @@ func (k *VersionInfo) String() string { // GetKernelVersion gets the current kernel version. func GetKernelVersion() (*VersionInfo, error) { - var ( h windows.Handle dwVersion uint32 diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp.go b/vendor/github.com/containers/storage/pkg/regexp/regexp.go index 458b83378..5b6a3f5e7 100644 --- a/vendor/github.com/containers/storage/pkg/regexp/regexp.go +++ b/vendor/github.com/containers/storage/pkg/regexp/regexp.go @@ -10,14 +10,17 @@ import ( // used as global variables. Using this structure helps speed the startup time // of apps that want to use global regex variables. This library initializes them on // first use as opposed to the start of the executable. -type Regexp struct { +type Regexp = *regexpStruct + +type regexpStruct struct { + _ noCopy once sync.Once regexp *regexp.Regexp val string } func Delayed(val string) Regexp { - re := Regexp{ + re := ®expStruct{ val: val, } if precompile { @@ -26,7 +29,7 @@ func Delayed(val string) Regexp { return re } -func (re *Regexp) compile() { +func (re *regexpStruct) compile() { if precompile { return } @@ -35,180 +38,195 @@ func (re *Regexp) compile() { }) } -func (re *Regexp) Expand(dst []byte, template []byte, src []byte, match []int) []byte { +func (re *regexpStruct) Expand(dst []byte, template []byte, src []byte, match []int) []byte { re.compile() return re.regexp.Expand(dst, template, src, match) } -func (re *Regexp) ExpandString(dst []byte, template string, src string, match []int) []byte { +func (re *regexpStruct) ExpandString(dst []byte, template string, src string, match []int) []byte { re.compile() return re.regexp.ExpandString(dst, template, src, match) } -func (re *Regexp) Find(b []byte) []byte { + +func (re *regexpStruct) Find(b []byte) []byte { re.compile() return re.regexp.Find(b) } -func (re *Regexp) FindAll(b []byte, n int) [][]byte { +func (re *regexpStruct) FindAll(b []byte, n int) [][]byte { re.compile() return re.regexp.FindAll(b, n) } -func (re *Regexp) FindAllIndex(b []byte, n int) [][]int { +func (re *regexpStruct) FindAllIndex(b []byte, n int) [][]int { re.compile() return re.regexp.FindAllIndex(b, n) } -func (re *Regexp) FindAllString(s string, n int) []string { +func (re *regexpStruct) FindAllString(s string, n int) []string { re.compile() return re.regexp.FindAllString(s, n) } -func (re *Regexp) FindAllStringIndex(s string, n int) [][]int { +func (re *regexpStruct) FindAllStringIndex(s string, n int) [][]int { re.compile() return re.regexp.FindAllStringIndex(s, n) } -func (re *Regexp) FindAllStringSubmatch(s string, n int) [][]string { +func (re *regexpStruct) FindAllStringSubmatch(s string, n int) [][]string { re.compile() return re.regexp.FindAllStringSubmatch(s, n) } -func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int { +func (re *regexpStruct) FindAllStringSubmatchIndex(s string, n int) [][]int { re.compile() return re.regexp.FindAllStringSubmatchIndex(s, n) } -func (re *Regexp) FindAllSubmatch(b []byte, n int) [][][]byte { +func (re *regexpStruct) FindAllSubmatch(b []byte, n int) [][][]byte { re.compile() return re.regexp.FindAllSubmatch(b, n) } -func (re *Regexp) FindAllSubmatchIndex(b []byte, n int) [][]int { +func (re *regexpStruct) FindAllSubmatchIndex(b []byte, n int) [][]int { re.compile() return re.regexp.FindAllSubmatchIndex(b, n) } -func (re *Regexp) FindIndex(b []byte) (loc []int) { +func (re *regexpStruct) FindIndex(b []byte) (loc []int) { re.compile() return re.regexp.FindIndex(b) } -func (re *Regexp) FindReaderIndex(r io.RuneReader) (loc []int) { +func (re *regexpStruct) FindReaderIndex(r io.RuneReader) (loc []int) { re.compile() return re.regexp.FindReaderIndex(r) } -func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int { +func (re *regexpStruct) FindReaderSubmatchIndex(r io.RuneReader) []int { re.compile() return re.regexp.FindReaderSubmatchIndex(r) } -func (re *Regexp) FindString(s string) string { +func (re *regexpStruct) FindString(s string) string { re.compile() return re.regexp.FindString(s) } -func (re *Regexp) FindStringIndex(s string) (loc []int) { +func (re *regexpStruct) FindStringIndex(s string) (loc []int) { re.compile() return re.regexp.FindStringIndex(s) } -func (re *Regexp) FindStringSubmatch(s string) []string { +func (re *regexpStruct) FindStringSubmatch(s string) []string { re.compile() return re.regexp.FindStringSubmatch(s) } -func (re *Regexp) FindStringSubmatchIndex(s string) []int { +func (re *regexpStruct) FindStringSubmatchIndex(s string) []int { re.compile() return re.regexp.FindStringSubmatchIndex(s) } -func (re *Regexp) FindSubmatch(b []byte) [][]byte { +func (re *regexpStruct) FindSubmatch(b []byte) [][]byte { re.compile() return re.regexp.FindSubmatch(b) } -func (re *Regexp) FindSubmatchIndex(b []byte) []int { +func (re *regexpStruct) FindSubmatchIndex(b []byte) []int { re.compile() return re.regexp.FindSubmatchIndex(b) } -func (re *Regexp) LiteralPrefix() (prefix string, complete bool) { +func (re *regexpStruct) LiteralPrefix() (prefix string, complete bool) { re.compile() return re.regexp.LiteralPrefix() } -func (re *Regexp) Longest() { +func (re *regexpStruct) Longest() { re.compile() re.regexp.Longest() } -func (re *Regexp) Match(b []byte) bool { +func (re *regexpStruct) Match(b []byte) bool { re.compile() return re.regexp.Match(b) } -func (re *Regexp) MatchReader(r io.RuneReader) bool { +func (re *regexpStruct) MatchReader(r io.RuneReader) bool { re.compile() return re.regexp.MatchReader(r) } -func (re *Regexp) MatchString(s string) bool { + +func (re *regexpStruct) MatchString(s string) bool { re.compile() return re.regexp.MatchString(s) } -func (re *Regexp) NumSubexp() int { +func (re *regexpStruct) NumSubexp() int { re.compile() return re.regexp.NumSubexp() } -func (re *Regexp) ReplaceAll(src, repl []byte) []byte { +func (re *regexpStruct) ReplaceAll(src, repl []byte) []byte { re.compile() return re.regexp.ReplaceAll(src, repl) } -func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte { +func (re *regexpStruct) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte { re.compile() return re.regexp.ReplaceAllFunc(src, repl) } -func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte { +func (re *regexpStruct) ReplaceAllLiteral(src, repl []byte) []byte { re.compile() return re.regexp.ReplaceAllLiteral(src, repl) } -func (re *Regexp) ReplaceAllLiteralString(src, repl string) string { +func (re *regexpStruct) ReplaceAllLiteralString(src, repl string) string { re.compile() return re.regexp.ReplaceAllLiteralString(src, repl) } -func (re *Regexp) ReplaceAllString(src, repl string) string { +func (re *regexpStruct) ReplaceAllString(src, repl string) string { re.compile() return re.regexp.ReplaceAllString(src, repl) } -func (re *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string { +func (re *regexpStruct) ReplaceAllStringFunc(src string, repl func(string) string) string { re.compile() return re.regexp.ReplaceAllStringFunc(src, repl) } -func (re *Regexp) Split(s string, n int) []string { +func (re *regexpStruct) Split(s string, n int) []string { re.compile() return re.regexp.Split(s, n) } -func (re *Regexp) String() string { +func (re *regexpStruct) String() string { re.compile() return re.regexp.String() } -func (re *Regexp) SubexpIndex(name string) int { +func (re *regexpStruct) SubexpIndex(name string) int { re.compile() return re.regexp.SubexpIndex(name) } -func (re *Regexp) SubexpNames() []string { +func (re *regexpStruct) SubexpNames() []string { re.compile() return re.regexp.SubexpNames() } + +// noCopy may be added to structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +// +// Note that it must not be embedded, due to the Lock and Unlock methods. +type noCopy struct{} + +// Lock is a no-op used by -copylocks checker from `go vet`. +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} diff --git a/vendor/github.com/containers/storage/pkg/stringid/stringid.go b/vendor/github.com/containers/storage/pkg/stringid/stringid.go index 3ae44fd8a..20abc7407 100644 --- a/vendor/github.com/containers/storage/pkg/stringid/stringid.go +++ b/vendor/github.com/containers/storage/pkg/stringid/stringid.go @@ -63,7 +63,7 @@ func generateID(r io.Reader) string { } } -// GenerateRandomID returns a unique id. +// GenerateRandomID returns a pseudorandom 64-character hex string. func GenerateRandomID() string { return generateID(cryptorand.Reader) } diff --git a/vendor/github.com/containers/storage/pkg/system/errors.go b/vendor/github.com/containers/storage/pkg/system/errors.go index 288318985..b87d419b5 100644 --- a/vendor/github.com/containers/storage/pkg/system/errors.go +++ b/vendor/github.com/containers/storage/pkg/system/errors.go @@ -4,7 +4,5 @@ import ( "errors" ) -var ( - // ErrNotSupportedPlatform means the platform is not supported. - ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") -) +// ErrNotSupportedPlatform means the platform is not supported. +var ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") diff --git a/vendor/github.com/containers/storage/pkg/system/init_windows.go b/vendor/github.com/containers/storage/pkg/system/init_windows.go index 019c66441..5f6fea1d3 100644 --- a/vendor/github.com/containers/storage/pkg/system/init_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/init_windows.go @@ -13,5 +13,4 @@ func init() { if os.Getenv("LCOW_SUPPORTED") != "" { lcowSupported = true } - } diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go index df53c40e2..a90b23e03 100644 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go @@ -84,7 +84,6 @@ func getFreeMem() int64 { // // MemInfo type. func ReadMemInfo() (*MemInfo, error) { - ppKernel := C.getPpKernel() MemTotal := getTotalMem() MemFree := getFreeMem() diff --git a/vendor/github.com/containers/storage/pkg/system/path.go b/vendor/github.com/containers/storage/pkg/system/path.go index f634a6be6..ca076f2bc 100644 --- a/vendor/github.com/containers/storage/pkg/system/path.go +++ b/vendor/github.com/containers/storage/pkg/system/path.go @@ -17,5 +17,4 @@ func DefaultPathEnv(platform string) string { return "" } return defaultUnixPathEnv - } diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go index 60c7d8bd9..5917fa251 100644 --- a/vendor/github.com/containers/storage/pkg/system/rm.go +++ b/vendor/github.com/containers/storage/pkg/system/rm.go @@ -30,6 +30,12 @@ func EnsureRemoveAll(dir string) error { exitOnErr := make(map[string]int) maxRetry := 100 + // Attempt a simple remove all first, this avoids the more expensive + // RecursiveUnmount call if not needed. + if err := os.RemoveAll(dir); err == nil { + return nil + } + // Attempt to unmount anything beneath this dir first if err := mount.RecursiveUnmount(dir); err != nil { logrus.Debugf("RecusiveUnmount on %s failed: %v", dir, err) diff --git a/vendor/github.com/containers/storage/pkg/system/stat_common.go b/vendor/github.com/containers/storage/pkg/system/stat_common.go index e965c54c2..2f44d18b6 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_common.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_common.go @@ -3,8 +3,7 @@ package system -type platformStatT struct { -} +type platformStatT struct{} // Flags return file flags if supported or zero otherwise func (s StatT) Flags() uint32 { diff --git a/vendor/github.com/containers/storage/pkg/system/stat_darwin.go b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go index 715f05b93..57850a883 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_darwin.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go @@ -4,10 +4,12 @@ import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + return &StatT{ + size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil + mtim: s.Mtimespec, + }, nil } diff --git a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go index 9c510468f..4b95073a3 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go @@ -13,13 +13,15 @@ func (s StatT) Flags() uint32 { // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - st := &StatT{size: s.Size, + st := &StatT{ + size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), mtim: s.Mtimespec, - dev: s.Dev} + dev: s.Dev, + } st.flags = s.Flags st.dev = s.Dev return st, nil diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go index e5dcba822..e3d13463f 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_linux.go @@ -4,13 +4,15 @@ import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + return &StatT{ + size: s.Size, mode: s.Mode, uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), mtim: s.Mtim, - dev: uint64(s.Dev)}, nil + dev: uint64(s.Dev), + }, nil } // FromStatT converts a syscall.Stat_t type to a system.Stat_t type diff --git a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go index b607dea94..a413e1714 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go @@ -4,10 +4,12 @@ import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + return &StatT{ + size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil + mtim: s.Mtim, + }, nil } diff --git a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go index b607dea94..a413e1714 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go @@ -4,10 +4,12 @@ import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + return &StatT{ + size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil + mtim: s.Mtim, + }, nil } diff --git a/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/github.com/containers/storage/pkg/system/stat_windows.go index 81edaadbb..6d5c6c142 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_windows.go @@ -65,5 +65,6 @@ func fromStatT(fi *os.FileInfo) (*StatT, error) { return &StatT{ size: (*fi).Size(), mode: (*fi).Mode(), - mtim: (*fi).ModTime()}, nil + mtim: (*fi).ModTime(), + }, nil } diff --git a/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go index b81793adc..c14a5cc4d 100644 --- a/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go +++ b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go @@ -102,9 +102,7 @@ func (idx *TruncIndex) Get(s string) (string, error) { if s == "" { return "", ErrEmptyPrefix } - var ( - id string - ) + var id string subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { if id != "" { // we haven't found the ID if there are two or more IDs diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index 99cc94a36..93a9a236e 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -55,7 +55,7 @@ additionalimagestores = [ # can deduplicate pulling of content, disk storage of content and can allow the # kernel to use less memory when running containers. -# containers/storage supports four keys +# containers/storage supports three keys # * enable_partial_images="true" | "false" # Tells containers/storage to look for files previously pulled in storage # rather then always pulling them from the container registry. @@ -75,8 +75,8 @@ pull_options = {enable_partial_images = "false", use_hard_links = "false", ostre # mappings which the kernel will allow when you later attempt to run a # container. # -# remap-uids = 0:1668442479:65536 -# remap-gids = 0:1668442479:65536 +# remap-uids = "0:1668442479:65536" +# remap-gids = "0:1668442479:65536" # Remap-User/Group is a user name which can be used to look up one or more UID/GID # ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting @@ -84,7 +84,8 @@ pull_options = {enable_partial_images = "false", use_hard_links = "false", ostre # range that matches the specified name, and using the length of that range. # Additional ranges are then assigned, using the ranges which specify the # lowest host-level IDs first, to the lowest not-yet-mapped in-container ID, -# until all of the entries have been used for maps. +# until all of the entries have been used for maps. This setting overrides the +# Remap-UIDs/GIDs setting. # # remap-user = "containers" # remap-group = "containers" @@ -100,7 +101,7 @@ pull_options = {enable_partial_images = "false", use_hard_links = "false", ostre # Auto-userns-min-size is the minimum size for a user namespace created automatically. # auto-userns-min-size=1024 # -# Auto-userns-max-size is the minimum size for a user namespace created automatically. +# Auto-userns-max-size is the maximum size for a user namespace created automatically. # auto-userns-max-size=65536 [storage.options.overlay] diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 4c4082084..14c1edd7f 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -520,6 +520,13 @@ type Store interface { // references in the json files. These can happen in the case of unclean // shutdowns or regular restarts in transient store mode. GarbageCollect() error + + // Check returns a report of things that look wrong in the store. + Check(options *CheckOptions) (CheckReport, error) + // Repair attempts to remediate problems mentioned in the CheckReport, + // usually by deleting layers and images which are damaged. If the + // right options are set, it will remove containers as well. + Repair(report CheckReport, options *RepairOptions) []error } // AdditionalLayer represents a layer that is contained in the additional layer store @@ -661,6 +668,7 @@ type store struct { usernsLock *lockfile.LockFile graphRoot string graphOptions []string + imageStoreDir string pullOptions map[string]string uidMap []idtools.IDMap gidMap []idtools.IDMap @@ -668,6 +676,7 @@ type store struct { autoNsMinSize uint32 autoNsMaxSize uint32 imageStore rwImageStore + rwImageStores []rwImageStore roImageStores []roImageStore containerStore rwContainerStore digestLockRoot string @@ -749,15 +758,25 @@ func GetStore(options types.StoreOptions) (Store, error) { options.RunRoot = defaultOpts.RunRoot } - if err := os.MkdirAll(options.RunRoot, 0700); err != nil { + if err := os.MkdirAll(options.RunRoot, 0o700); err != nil { return nil, err } - if err := os.MkdirAll(options.GraphRoot, 0700); err != nil { + if err := os.MkdirAll(options.GraphRoot, 0o700); err != nil { return nil, err } - if err := os.MkdirAll(filepath.Join(options.GraphRoot, options.GraphDriverName), 0700); err != nil { + if options.ImageStore != "" { + if err := os.MkdirAll(options.ImageStore, 0o700); err != nil { + return nil, err + } + } + if err := os.MkdirAll(filepath.Join(options.GraphRoot, options.GraphDriverName), 0o700); err != nil { return nil, err } + if options.ImageStore != "" { + if err := os.MkdirAll(filepath.Join(options.ImageStore, options.GraphDriverName), 0o700); err != nil { + return nil, err + } + } graphLock, err := lockfile.GetLockFile(filepath.Join(options.GraphRoot, "storage.lock")) if err != nil { @@ -785,6 +804,7 @@ func GetStore(options types.StoreOptions) (Store, error) { usernsLock: usernsLock, graphRoot: options.GraphRoot, graphOptions: options.GraphDriverOptions, + imageStoreDir: options.ImageStore, pullOptions: options.PullOptions, uidMap: copyIDMap(options.UIDMap), gidMap: copyIDMap(options.GIDMap), @@ -889,8 +909,12 @@ func (s *store) load() error { } driverPrefix := s.graphDriverName + "-" - gipath := filepath.Join(s.graphRoot, driverPrefix+"images") - if err := os.MkdirAll(gipath, 0700); err != nil { + imgStoreRoot := s.imageStoreDir + if imgStoreRoot == "" { + imgStoreRoot = s.graphRoot + } + gipath := filepath.Join(imgStoreRoot, driverPrefix+"images") + if err := os.MkdirAll(gipath, 0o700); err != nil { return err } ris, err := newImageStore(gipath) @@ -900,11 +924,11 @@ func (s *store) load() error { s.imageStore = ris gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers") - if err := os.MkdirAll(gcpath, 0700); err != nil { + if err := os.MkdirAll(gcpath, 0o700); err != nil { return err } rcpath := filepath.Join(s.runRoot, driverPrefix+"containers") - if err := os.MkdirAll(rcpath, 0700); err != nil { + if err := os.MkdirAll(rcpath, 0o700); err != nil { return err } @@ -917,15 +941,28 @@ func (s *store) load() error { for _, store := range driver.AdditionalImageStores() { gipath := filepath.Join(store, driverPrefix+"images") - ris, err := newROImageStore(gipath) - if err != nil { - return err + var ris roImageStore + if s.imageStoreDir != "" && store == s.graphRoot { + // If --imagestore was set and current store + // is `graphRoot` then mount it as a `rw` additional + // store instead of `readonly` additional store. + imageStore, err := newImageStore(gipath) + if err != nil { + return err + } + s.rwImageStores = append(s.rwImageStores, imageStore) + ris = imageStore + } else { + ris, err = newROImageStore(gipath) + if err != nil { + return err + } } s.roImageStores = append(s.roImageStores, ris) } s.digestLockRoot = filepath.Join(s.runRoot, driverPrefix+"locks") - if err := os.MkdirAll(s.digestLockRoot, 0700); err != nil { + if err := os.MkdirAll(s.digestLockRoot, 0o700); err != nil { return err } @@ -989,8 +1026,15 @@ func (s *store) stopUsingGraphDriver() { // Almost all users should use startUsingGraphDriver instead. // The caller must hold s.graphLock. func (s *store) createGraphDriverLocked() (drivers.Driver, error) { + driverRoot := s.imageStoreDir + imageStoreBase := s.graphRoot + if driverRoot == "" { + driverRoot = s.graphRoot + imageStoreBase = "" + } config := drivers.Options{ - Root: s.graphRoot, + Root: driverRoot, + ImageStore: imageStoreBase, RunRoot: s.runRoot, DriverPriority: s.graphDriverPriority, DriverOptions: s.graphOptions, @@ -1017,11 +1061,15 @@ func (s *store) getLayerStoreLocked() (rwLayerStore, error) { } driverPrefix := s.graphDriverName + "-" rlpath := filepath.Join(s.runRoot, driverPrefix+"layers") - if err := os.MkdirAll(rlpath, 0700); err != nil { + if err := os.MkdirAll(rlpath, 0o700); err != nil { return nil, err } - glpath := filepath.Join(s.graphRoot, driverPrefix+"layers") - if err := os.MkdirAll(glpath, 0700); err != nil { + imgStoreRoot := s.imageStoreDir + if imgStoreRoot == "" { + imgStoreRoot = s.graphRoot + } + glpath := filepath.Join(imgStoreRoot, driverPrefix+"layers") + if err := os.MkdirAll(glpath, 0o700); err != nil { return nil, err } rls, err := s.newLayerStore(rlpath, glpath, s.graphDriver, s.transientStore) @@ -1052,7 +1100,7 @@ func (s *store) getROLayerStoresLocked() ([]roLayerStore, error) { } driverPrefix := s.graphDriverName + "-" rlpath := filepath.Join(s.runRoot, driverPrefix+"layers") - if err := os.MkdirAll(rlpath, 0700); err != nil { + if err := os.MkdirAll(rlpath, 0o700); err != nil { return nil, err } for _, store := range s.graphDriver.AdditionalImageStores() { @@ -1081,7 +1129,7 @@ func (s *store) bothLayerStoreKindsLocked() (rwLayerStore, []roLayerStore, error } // bothLayerStoreKinds returns the primary, and additional read-only, layer store objects used by the store. -// It must be called with s.graphLock held. +// It must be called WITHOUT s.graphLock held. func (s *store) bothLayerStoreKinds() (rwLayerStore, []roLayerStore, error) { if err := s.startUsingGraphDriver(); err != nil { return nil, nil, err @@ -1115,56 +1163,107 @@ func (s *store) allLayerStores() ([]roLayerStore, error) { // readAllLayerStores processes allLayerStores() in order: // It locks the store for reading, checks for updates, and calls // -// (done, err) := fn(store) +// (data, done, err) := fn(store) // // until the callback returns done == true, and returns the data from the callback. // -// If reading any layer store fails, it immediately returns (true, err). +// If reading any layer store fails, it immediately returns ({}, true, err). // -// If all layer stores are processed without setting done == true, it returns (false, nil). +// If all layer stores are processed without setting done == true, it returns ({}, false, nil). // // Typical usage: // -// var res T = failureValue -// if done, err := s.readAllLayerStores(store, func(…) { +// if res, done, err := s.readAllLayerStores(store, func(…) { // … // }; done { // return res, err // } -func (s *store) readAllLayerStores(fn func(store roLayerStore) (bool, error)) (bool, error) { +func readAllLayerStores[T any](s *store, fn func(store roLayerStore) (T, bool, error)) (T, bool, error) { + var zeroRes T // A zero value of T + layerStores, err := s.allLayerStores() if err != nil { - return true, err + return zeroRes, true, err } for _, s := range layerStores { store := s if err := store.startReading(); err != nil { - return true, err + return zeroRes, true, err } defer store.stopReading() - if done, err := fn(store); done { - return true, err + if res, done, err := fn(store); done { + return res, true, err } } - return false, nil + return zeroRes, false, nil } // writeToLayerStore is a helper for working with store.getLayerStore(): // It locks the store for writing, checks for updates, and calls fn() // It returns the return value of fn, or its own error initializing the store. -func (s *store) writeToLayerStore(fn func(store rwLayerStore) error) error { +func writeToLayerStore[T any](s *store, fn func(store rwLayerStore) (T, error)) (T, error) { + var zeroRes T // A zero value of T + store, err := s.getLayerStore() if err != nil { - return err + return zeroRes, err } if err := store.startWriting(); err != nil { - return err + return zeroRes, err } defer store.stopWriting() return fn(store) } +// readOrWriteAllLayerStores processes allLayerStores() in order: +// It locks the writeable store for writing and all others for reading, checks +// for updates, and calls +// +// (data, done, err) := fn(store) +// +// until the callback returns done == true, and returns the data from the callback. +// +// If reading or writing any layer store fails, it immediately returns ({}, true, err). +// +// If all layer stores are processed without setting done == true, it returns ({}, false, nil). +// +// Typical usage: +// +// if res, done, err := s.readOrWriteAllLayerStores(store, func(…) { +// … +// }; done { +// return res, err +// } +func readOrWriteAllLayerStores[T any](s *store, fn func(store roLayerStore) (T, bool, error)) (T, bool, error) { + var zeroRes T // A zero value of T + + rwLayerStore, roLayerStores, err := s.bothLayerStoreKinds() + if err != nil { + return zeroRes, true, err + } + + if err := rwLayerStore.startWriting(); err != nil { + return zeroRes, true, err + } + defer rwLayerStore.stopWriting() + if res, done, err := fn(rwLayerStore); done { + return res, true, err + } + + for _, s := range roLayerStores { + store := s + if err := store.startReading(); err != nil { + return zeroRes, true, err + } + defer store.stopReading() + if res, done, err := fn(store); done { + return res, true, err + } + } + return zeroRes, false, nil +} + // allImageStores returns a list of all image store objects used by the Store. // This is a convenience method for read-only users of the Store. func (s *store) allImageStores() []roImageStore { @@ -1174,53 +1273,69 @@ func (s *store) allImageStores() []roImageStore { // readAllImageStores processes allImageStores() in order: // It locks the store for reading, checks for updates, and calls // -// (done, err) := fn(store) +// (data, done, err) := fn(store) // // until the callback returns done == true, and returns the data from the callback. // -// If reading any Image store fails, it immediately returns (true, err). +// If reading any Image store fails, it immediately returns ({}, true, err). // -// If all Image stores are processed without setting done == true, it returns (false, nil). +// If all Image stores are processed without setting done == true, it returns ({}, false, nil). // // Typical usage: // -// var res T = failureValue -// if done, err := s.readAllImageStores(store, func(…) { +// if res, done, err := readAllImageStores(store, func(…) { // … // }; done { // return res, err // } -func (s *store) readAllImageStores(fn func(store roImageStore) (bool, error)) (bool, error) { +func readAllImageStores[T any](s *store, fn func(store roImageStore) (T, bool, error)) (T, bool, error) { + var zeroRes T // A zero value of T + for _, s := range s.allImageStores() { store := s if err := store.startReading(); err != nil { - return true, err + return zeroRes, true, err } defer store.stopReading() - if done, err := fn(store); done { - return true, err + if res, done, err := fn(store); done { + return res, true, err } } - return false, nil + return zeroRes, false, nil } -// writeToImageStore is a convenience helper for working with store.getImageStore(): +// writeToImageStore is a convenience helper for working with store.imageStore: // It locks the store for writing, checks for updates, and calls fn(), which can then access store.imageStore. // It returns the return value of fn, or its own error initializing the store. -func (s *store) writeToImageStore(fn func() error) error { +func writeToImageStore[T any](s *store, fn func() (T, error)) (T, error) { if err := s.imageStore.startWriting(); err != nil { - return err + var zeroRes T // A zero value of T + return zeroRes, err } defer s.imageStore.stopWriting() return fn() } -// writeToContainerStore is a convenience helper for working with store.getContainerStore(): +// readContainerStore is a convenience helper for working with store.containerStore: +// It locks the store for reading, checks for updates, and calls fn(), which can then access store.containerStore. +// If reading the container store fails, it returns ({}, true, err). +// Returns the return value of fn on success. +func readContainerStore[T any](s *store, fn func() (T, bool, error)) (T, bool, error) { + if err := s.containerStore.startReading(); err != nil { + var zeroRes T // A zero value of T + return zeroRes, true, err + } + defer s.containerStore.stopReading() + return fn() +} + +// writeToContainerStore is a convenience helper for working with store.containerStore: // It locks the store for writing, checks for updates, and calls fn(), which can then access store.containerStore. // It returns the return value of fn, or its own error initializing the store. -func (s *store) writeToContainerStore(fn func() error) error { +func writeToContainerStore[T any](s *store, fn func() (T, error)) (T, error) { if err := s.containerStore.startWriting(); err != nil { - return err + var zeroRes T // A zero value of T + return zeroRes, err } defer s.containerStore.stopWriting() return fn() @@ -1252,10 +1367,13 @@ func (s *store) writeToAllStores(fn func(rlstore rwLayerStore) error) error { return fn(rlstore) } -// canUseShifting returns ??? -// store must be locked for writing. -func canUseShifting(store rwLayerStore, uidmap, gidmap []idtools.IDMap) bool { - if !store.supportsShifting() { +// canUseShifting returns true if we can use mount-time arguments (shifting) to +// avoid having to create a mapped top layer for a base image when we want to +// use it to create a container using ID mappings. +// On entry: +// - rlstore must be locked for writing +func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool { + if !s.graphDriver.SupportsShifting() { return false } if uidmap != nil && !idtools.IsContiguous(uidmap) { @@ -1342,7 +1460,7 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w OriginalDigest: options.OriginalDigest, UncompressedDigest: options.UncompressedDigest, } - if canUseShifting(rlstore, uidMap, gidMap) { + if s.canUseShifting(uidMap, gidMap) { layerOptions.IDMappingOptions = types.IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil} } else { layerOptions.IDMappingOptions = types.IDMappingOptions{ @@ -1384,94 +1502,93 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i layer = ilayer.ID } - var options ImageOptions - var namesToAddAfterCreating []string - - if err := s.imageStore.startWriting(); err != nil { - return nil, err - } - defer s.imageStore.stopWriting() + return writeToImageStore(s, func() (*Image, error) { + var options ImageOptions + var namesToAddAfterCreating []string - // Check if the ID refers to an image in a read-only store -- we want - // to allow images in read-only stores to have their names changed, so - // if we find one, merge the new values in with what we know about the - // image that's already there. - if id != "" { - for _, is := range s.roImageStores { - store := is - if err := store.startReading(); err != nil { - return nil, err - } - defer store.stopReading() - if i, err := store.Get(id); err == nil { - // set information about this image in "options" - options = ImageOptions{ - Metadata: i.Metadata, - CreationDate: i.Created, - Digest: i.Digest, - Digests: copyDigestSlice(i.Digests), - NamesHistory: copyStringSlice(i.NamesHistory), + // Check if the ID refers to an image in a read-only store -- we want + // to allow images in read-only stores to have their names changed, so + // if we find one, merge the new values in with what we know about the + // image that's already there. + if id != "" { + for _, is := range s.roImageStores { + store := is + if err := store.startReading(); err != nil { + return nil, err } - for _, key := range i.BigDataNames { - data, err := store.BigData(id, key) - if err != nil { - return nil, err + defer store.stopReading() + if i, err := store.Get(id); err == nil { + // set information about this image in "options" + options = ImageOptions{ + Metadata: i.Metadata, + CreationDate: i.Created, + Digest: i.Digest, + Digests: copyDigestSlice(i.Digests), + NamesHistory: copyStringSlice(i.NamesHistory), } - dataDigest, err := store.BigDataDigest(id, key) - if err != nil { - return nil, err + for _, key := range i.BigDataNames { + data, err := store.BigData(id, key) + if err != nil { + return nil, err + } + dataDigest, err := store.BigDataDigest(id, key) + if err != nil { + return nil, err + } + options.BigData = append(options.BigData, ImageBigDataOption{ + Key: key, + Data: data, + Digest: dataDigest, + }) } - options.BigData = append(options.BigData, ImageBigDataOption{ - Key: key, - Data: data, - Digest: dataDigest, - }) + namesToAddAfterCreating = dedupeStrings(append(append([]string{}, i.Names...), names...)) + break } - namesToAddAfterCreating = dedupeStrings(append(append([]string{}, i.Names...), names...)) - break } } - } - // merge any passed-in options into "options" as best we can - if iOptions != nil { - if !iOptions.CreationDate.IsZero() { - options.CreationDate = iOptions.CreationDate - } - if iOptions.Digest != "" { - options.Digest = iOptions.Digest - } - options.Digests = append(options.Digests, copyDigestSlice(iOptions.Digests)...) - if iOptions.Metadata != "" { - options.Metadata = iOptions.Metadata + // merge any passed-in options into "options" as best we can + if iOptions != nil { + if !iOptions.CreationDate.IsZero() { + options.CreationDate = iOptions.CreationDate + } + if iOptions.Digest != "" { + options.Digest = iOptions.Digest + } + options.Digests = append(options.Digests, copyDigestSlice(iOptions.Digests)...) + if iOptions.Metadata != "" { + options.Metadata = iOptions.Metadata + } + options.BigData = append(options.BigData, copyImageBigDataOptionSlice(iOptions.BigData)...) + options.NamesHistory = append(options.NamesHistory, copyStringSlice(iOptions.NamesHistory)...) + if options.Flags == nil { + options.Flags = make(map[string]interface{}) + } + for k, v := range iOptions.Flags { + options.Flags[k] = v + } } - options.BigData = append(options.BigData, copyImageBigDataOptionSlice(iOptions.BigData)...) - options.NamesHistory = append(options.NamesHistory, copyStringSlice(iOptions.NamesHistory)...) - if options.Flags == nil { - options.Flags = make(map[string]interface{}) + + if options.CreationDate.IsZero() { + options.CreationDate = time.Now().UTC() } - for k, v := range iOptions.Flags { - options.Flags[k] = v + if metadata != "" { + options.Metadata = metadata } - } - if options.CreationDate.IsZero() { - options.CreationDate = time.Now().UTC() - } - if metadata != "" { - options.Metadata = metadata - } - - res, err := s.imageStore.create(id, names, layer, options) - if err == nil && len(namesToAddAfterCreating) > 0 { - // set any names we pulled up from an additional image store, now that we won't be - // triggering a duplicate names error - err = s.imageStore.updateNames(res.ID, namesToAddAfterCreating, addNames) - } - return res, err + res, err := s.imageStore.create(id, names, layer, options) + if err == nil && len(namesToAddAfterCreating) > 0 { + // set any names we pulled up from an additional image store, now that we won't be + // triggering a duplicate names error + err = s.imageStore.updateNames(res.ID, namesToAddAfterCreating, addNames) + } + return res, err + }) } -// imageTopLayerForMapping does ??? +// imageTopLayerForMapping locates the layer that can take the place of the +// image's top layer as the shared parent layer for a one or more containers +// which are using ID mappings. // On entry: // - ristore must be locked EITHER for reading or writing // - s.imageStore must be locked for writing; it might be identical to ristore. @@ -1480,7 +1597,7 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlstore rwLayerStore, lstores []roLayerStore, options types.IDMappingOptions) (*Layer, error) { layerMatchesMappingOptions := func(layer *Layer, options types.IDMappingOptions) bool { // If the driver supports shifting and the layer has no mappings, we can use it. - if canUseShifting(rlstore, options.UIDMap, options.GIDMap) && len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 { + if s.canUseShifting(options.UIDMap, options.GIDMap) && len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 { return true } // If we want host mapping, and the layer uses mappings, it's not the best match. @@ -1547,7 +1664,7 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlst // that lets us edit image metadata, so create a duplicate of the layer with the desired // mappings, and register it as an alternate top layer in the image. var layerOptions LayerOptions - if canUseShifting(rlstore, options.UIDMap, options.GIDMap) { + if s.canUseShifting(options.UIDMap, options.GIDMap) { layerOptions.IDMappingOptions = types.IDMappingOptions{ HostUIDMapping: true, HostGIDMapping: true, @@ -1700,7 +1817,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat // But in transient store mode, all container layers are volatile. Volatile: options.Volatile || s.transientStore, } - if canUseShifting(rlstore, uidMap, gidMap) { + if s.canUseShifting(uidMap, gidMap) { layerOptions.IDMappingOptions = types.IDMappingOptions{ HostUIDMapping: true, HostGIDMapping: true, @@ -1745,16 +1862,14 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat options.Volatile = true } - var container *Container - err = s.writeToContainerStore(func() error { + return writeToContainerStore(s, func() (*Container, error) { options.IDMappingOptions = types.IDMappingOptions{ HostUIDMapping: len(options.UIDMap) == 0, HostGIDMapping: len(options.GIDMap) == 0, UIDMap: copyIDMap(options.UIDMap), GIDMap: copyIDMap(options.GIDMap), } - var err error - container, err = s.containerStore.create(id, names, imageID, layer, &options) + container, err := s.containerStore.create(id, names, imageID, layer, &options) if err != nil || container == nil { if err2 := rlstore.Delete(layer); err2 != nil { if err == nil { @@ -1764,9 +1879,8 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat } } } - return err + return container, err }) - return container, err } func (s *store) SetMetadata(id, metadata string) error { @@ -1785,49 +1899,46 @@ func (s *store) SetMetadata(id, metadata string) error { } func (s *store) Metadata(id string) (string, error) { - var res string - - if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) (string, bool, error) { if store.Exists(id) { - var err error - res, err = store.Metadata(id) - return true, err + res, err := store.Metadata(id) + return res, true, err } - return false, nil + return "", false, nil }); done { return res, err } - if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { + if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) { if store.Exists(id) { - var err error - res, err = store.Metadata(id) - return true, err + res, err := store.Metadata(id) + return res, true, err } - return false, nil + return "", false, nil }); done { return res, err } - if err := s.containerStore.startReading(); err != nil { - return "", err - } - defer s.containerStore.stopReading() - if s.containerStore.Exists(id) { - return s.containerStore.Metadata(id) + if res, done, err := readContainerStore(s, func() (string, bool, error) { + if s.containerStore.Exists(id) { + res, err := s.containerStore.Metadata(id) + return res, true, err + } + return "", false, nil + }); done { + return res, err } + return "", ErrNotAnID } func (s *store) ListImageBigData(id string) ([]string, error) { - var res []string - if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { + if res, done, err := readAllImageStores(s, func(store roImageStore) ([]string, bool, error) { bigDataNames, err := store.BigDataNames(id) if err == nil { - res = bigDataNames - return true, nil + return bigDataNames, true, nil } - return false, nil + return nil, false, nil }); done { return res, err } @@ -1835,29 +1946,28 @@ func (s *store) ListImageBigData(id string) ([]string, error) { } func (s *store) ImageBigDataSize(id, key string) (int64, error) { - var res int64 = -1 - if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { + if res, done, err := readAllImageStores(s, func(store roImageStore) (int64, bool, error) { size, err := store.BigDataSize(id, key) if err == nil { - res = size - return true, nil + return size, true, nil } - return false, nil + return -1, false, nil }); done { - return res, err + if err != nil { + return -1, err + } + return res, nil } return -1, ErrSizeUnknown } func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) { - var res digest.Digest - if done, err := s.readAllImageStores(func(ristore roImageStore) (bool, error) { + if res, done, err := readAllImageStores(s, func(ristore roImageStore) (digest.Digest, bool, error) { d, err := ristore.BigDataDigest(id, key) if err == nil && d.Validate() == nil { - res = d - return true, nil + return d, true, nil } - return false, nil + return "", false, nil }); done { return res, err } @@ -1866,17 +1976,15 @@ func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) { func (s *store) ImageBigData(id, key string) ([]byte, error) { foundImage := false - var res []byte - if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { + if res, done, err := readAllImageStores(s, func(store roImageStore) ([]byte, bool, error) { data, err := store.BigData(id, key) if err == nil { - res = data - return true, nil + return data, true, nil } if store.Exists(id) { foundImage = true } - return false, nil + return nil, false, nil }); done { return res, err } @@ -1890,17 +1998,15 @@ func (s *store) ImageBigData(id, key string) ([]byte, error) { // named data associated with an layer. func (s *store) ListLayerBigData(id string) ([]string, error) { foundLayer := false - var res []string - if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) ([]string, bool, error) { data, err := store.BigDataNames(id) if err == nil { - res = data - return true, nil + return data, true, nil } if store.Exists(id) { foundLayer = true } - return false, nil + return nil, false, nil }); done { return res, err } @@ -1914,17 +2020,15 @@ func (s *store) ListLayerBigData(id string) ([]string, error) { // associated with a layer. func (s *store) LayerBigData(id, key string) (io.ReadCloser, error) { foundLayer := false - var res io.ReadCloser - if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) (io.ReadCloser, bool, error) { data, err := store.BigData(id, key) if err == nil { - res = data - return true, nil + return data, true, nil } if store.Exists(id) { foundLayer = true } - return false, nil + return nil, false, nil }); done { return res, err } @@ -1937,15 +2041,17 @@ func (s *store) LayerBigData(id, key string) (io.ReadCloser, error) { // SetLayerBigData stores a (possibly large) chunk of named data // associated with a layer. func (s *store) SetLayerBigData(id, key string, data io.Reader) error { - return s.writeToLayerStore(func(store rwLayerStore) error { - return store.SetBigData(id, key, data) + _, err := writeToLayerStore(s, func(store rwLayerStore) (struct{}, error) { + return struct{}{}, store.SetBigData(id, key, data) }) + return err } func (s *store) SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error { - return s.writeToImageStore(func() error { - return s.imageStore.SetBigData(id, key, data, digestManifest) + _, err := writeToImageStore(s, func() (struct{}, error) { + return struct{}{}, s.imageStore.SetBigData(id, key, data, digestManifest) }) + return err } func (s *store) ImageSize(id string) (int64, error) { @@ -2066,12 +2172,11 @@ func (s *store) ContainerSize(id string) (int64, error) { return -1, err } - var res int64 = -1 - err = s.writeToContainerStore(func() error { // Yes, s.containerStore.BigDataSize requires a write lock. + return writeToContainerStore(s, func() (int64, error) { // Yes, s.containerStore.BigDataSize requires a write lock. // Read the container record. container, err := s.containerStore.Get(id) if err != nil { - return err + return -1, err } // Read the container's layer's size. @@ -2081,24 +2186,24 @@ func (s *store) ContainerSize(id string) (int64, error) { if layer, err = store.Get(container.LayerID); err == nil { size, err = store.DiffSize("", layer.ID) if err != nil { - return fmt.Errorf("determining size of layer with ID %q: %w", layer.ID, err) + return -1, fmt.Errorf("determining size of layer with ID %q: %w", layer.ID, err) } break } } if layer == nil { - return fmt.Errorf("locating layer with ID %q: %w", container.LayerID, ErrLayerUnknown) + return -1, fmt.Errorf("locating layer with ID %q: %w", container.LayerID, ErrLayerUnknown) } // Count big data items. names, err := s.containerStore.BigDataNames(id) if err != nil { - return fmt.Errorf("reading list of big data items for container %q: %w", container.ID, err) + return -1, fmt.Errorf("reading list of big data items for container %q: %w", container.ID, err) } for _, name := range names { n, err := s.containerStore.BigDataSize(id, name) if err != nil { - return fmt.Errorf("reading size of big data item %q for container %q: %w", name, id, err) + return -1, fmt.Errorf("reading size of big data item %q for container %q: %w", name, id, err) } size += n } @@ -2106,92 +2211,88 @@ func (s *store) ContainerSize(id string) (int64, error) { // Count the size of our container directory and container run directory. n, err := directory.Size(cdir) if err != nil { - return err + return -1, err } size += n n, err = directory.Size(rdir) if err != nil { - return err + return -1, err } size += n - res = size - return nil + return size, nil }) - return res, err } func (s *store) ListContainerBigData(id string) ([]string, error) { - if err := s.containerStore.startReading(); err != nil { - return nil, err - } - defer s.containerStore.stopReading() - - return s.containerStore.BigDataNames(id) + res, _, err := readContainerStore(s, func() ([]string, bool, error) { + res, err := s.containerStore.BigDataNames(id) + return res, true, err + }) + return res, err } func (s *store) ContainerBigDataSize(id, key string) (int64, error) { - var res int64 = -1 - err := s.writeToContainerStore(func() error { // Yes, BigDataSize requires a write lock. - var err error - res, err = s.containerStore.BigDataSize(id, key) - return err + return writeToContainerStore(s, func() (int64, error) { // Yes, BigDataSize requires a write lock. + return s.containerStore.BigDataSize(id, key) }) - return res, err } func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) { - var res digest.Digest - err := s.writeToContainerStore(func() error { // Yes, BigDataDigest requires a write lock. - var err error - res, err = s.containerStore.BigDataDigest(id, key) - return err + return writeToContainerStore(s, func() (digest.Digest, error) { // Yes, BigDataDigest requires a write lock. + return s.containerStore.BigDataDigest(id, key) }) - return res, err } func (s *store) ContainerBigData(id, key string) ([]byte, error) { - if err := s.containerStore.startReading(); err != nil { - return nil, err - } - defer s.containerStore.stopReading() - return s.containerStore.BigData(id, key) + res, _, err := readContainerStore(s, func() ([]byte, bool, error) { + res, err := s.containerStore.BigData(id, key) + return res, true, err + }) + return res, err } func (s *store) SetContainerBigData(id, key string, data []byte) error { - return s.writeToContainerStore(func() error { - return s.containerStore.SetBigData(id, key, data) + _, err := writeToContainerStore(s, func() (struct{}, error) { + return struct{}{}, s.containerStore.SetBigData(id, key, data) }) + return err } func (s *store) Exists(id string) bool { - var res = false - - if done, _ := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + found, _, err := readAllLayerStores(s, func(store roLayerStore) (bool, bool, error) { if store.Exists(id) { - res = true - return true, nil + return true, true, nil } - return false, nil - }); done { - return res + return false, false, nil + }) + if err != nil { + return false + } + if found { + return true } - if done, _ := s.readAllImageStores(func(store roImageStore) (bool, error) { + found, _, err = readAllImageStores(s, func(store roImageStore) (bool, bool, error) { if store.Exists(id) { - res = true - return true, nil + return true, true, nil } - return false, nil - }); done { - return res + return false, false, nil + }) + if err != nil { + return false + } + if found { + return true } - if err := s.containerStore.startReading(); err != nil { + found, _, err = readContainerStore(s, func() (bool, bool, error) { + return s.containerStore.Exists(id), true, nil + }) + if err != nil { return false } - defer s.containerStore.stopReading() - return s.containerStore.Exists(id) + return found } func dedupeStrings(names []string) []string { @@ -2234,14 +2335,12 @@ func (s *store) RemoveNames(id string, names []string) error { func (s *store) updateNames(id string, names []string, op updateNameOperation) error { deduped := dedupeStrings(names) - layerFound := false - if err := s.writeToLayerStore(func(rlstore rwLayerStore) error { + if found, err := writeToLayerStore(s, func(rlstore rwLayerStore) (bool, error) { if !rlstore.Exists(id) { - return nil + return false, nil } - layerFound = true - return rlstore.updateNames(id, deduped, op) - }); err != nil || layerFound { + return true, rlstore.updateNames(id, deduped, op) + }); err != nil || found { return err } @@ -2295,14 +2394,12 @@ func (s *store) updateNames(id string, names []string, op updateNameOperation) e } } - containerFound := false - if err := s.writeToContainerStore(func() error { + if found, err := writeToContainerStore(s, func() (bool, error) { if !s.containerStore.Exists(id) { - return nil + return false, nil } - containerFound = true - return s.containerStore.updateNames(id, deduped, op) - }); err != nil || containerFound { + return true, s.containerStore.updateNames(id, deduped, op) + }); err != nil || found { return err } @@ -2310,67 +2407,62 @@ func (s *store) updateNames(id string, names []string, op updateNameOperation) e } func (s *store) Names(id string) ([]string, error) { - var res []string - - if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) ([]string, bool, error) { if l, err := store.Get(id); l != nil && err == nil { - res = l.Names - return true, nil + return l.Names, true, nil } - return false, nil + return nil, false, nil }); done { return res, err } - if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { + if res, done, err := readAllImageStores(s, func(store roImageStore) ([]string, bool, error) { if i, err := store.Get(id); i != nil && err == nil { - res = i.Names - return true, nil + return i.Names, true, nil } - return false, nil + return nil, false, nil }); done { return res, err } - if err := s.containerStore.startReading(); err != nil { - return nil, err - } - defer s.containerStore.stopReading() - if c, err := s.containerStore.Get(id); c != nil && err == nil { - return c.Names, nil + if res, done, err := readContainerStore(s, func() ([]string, bool, error) { + if c, err := s.containerStore.Get(id); c != nil && err == nil { + return c.Names, true, nil + } + return nil, false, nil + }); done { + return res, err } + return nil, ErrLayerUnknown } func (s *store) Lookup(name string) (string, error) { - var res string - - if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) (string, bool, error) { if l, err := store.Get(name); l != nil && err == nil { - res = l.ID - return true, nil + return l.ID, true, nil } - return false, nil + return "", false, nil }); done { return res, err } - if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { + if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) { if i, err := store.Get(name); i != nil && err == nil { - res = i.ID - return true, nil + return i.ID, true, nil } - return false, nil + return "", false, nil }); done { return res, err } - if err := s.containerStore.startReading(); err != nil { - return "", err - } - defer s.containerStore.stopReading() - if c, err := s.containerStore.Get(name); c != nil && err == nil { - return c.ID, nil + if res, done, err := readContainerStore(s, func() (string, bool, error) { + if c, err := s.containerStore.Get(name); c != nil && err == nil { + return c.ID, true, nil + } + return "", false, nil + }); done { + return res, err } return "", ErrLayerUnknown @@ -2430,8 +2522,22 @@ func (s *store) DeleteLayer(id string) error { func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) { layersToRemove := []string{} if err := s.writeToAllStores(func(rlstore rwLayerStore) error { - if s.imageStore.Exists(id) { - image, err := s.imageStore.Get(id) + // Delete image from all available imagestores configured to be used. + imageFound := false + for _, is := range append([]rwImageStore{s.imageStore}, s.rwImageStores...) { + if is != s.imageStore { + // This is an additional writeable image store + // so we must perform lock + if err := is.startWriting(); err != nil { + return err + } + defer is.stopWriting() + } + if !is.Exists(id) { + continue + } + imageFound = true + image, err := is.Get(id) if err != nil { return err } @@ -2447,7 +2553,7 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) if container, ok := aContainerByImage[id]; ok { return fmt.Errorf("image used by %v: %w", container, ErrImageUsedByContainer) } - images, err := s.imageStore.Images() + images, err := is.Images() if err != nil { return err } @@ -2469,7 +2575,7 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) } } if commit { - if err = s.imageStore.Delete(id); err != nil { + if err = is.Delete(id); err != nil { return err } } @@ -2514,7 +2620,8 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) layersToRemoveMap[layer] = struct{}{} layer = parent } - } else { + } + if !imageFound { return ErrNotAnImage } if commit { @@ -2542,63 +2649,45 @@ func (s *store) DeleteContainer(id string) error { return ErrNotAContainer } - errChan := make(chan error) - var wg sync.WaitGroup - + // delete the layer first, separately, so that if we get an + // error while trying to do so, we don't go ahead and delete + // the container record that refers to it, effectively losing + // track of it if rlstore.Exists(container.LayerID) { - wg.Add(1) - go func() { - errChan <- rlstore.Delete(container.LayerID) - wg.Done() - }() - } - wg.Add(1) - go func() { - errChan <- s.containerStore.Delete(id) - wg.Done() - }() + if err := rlstore.Delete(container.LayerID); err != nil { + return err + } + } + + var wg multierror.Group + wg.Go(func() error { return s.containerStore.Delete(id) }) middleDir := s.graphDriverName + "-containers" - gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) - wg.Add(1) - go func() { - defer wg.Done() + + wg.Go(func() error { + gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) // attempt a simple rm -rf first - err := os.RemoveAll(gcpath) - if err == nil { - errChan <- nil - return + if err := os.RemoveAll(gcpath); err == nil { + return nil } // and if it fails get to the more complicated cleanup - errChan <- system.EnsureRemoveAll(gcpath) - }() + return system.EnsureRemoveAll(gcpath) + }) - rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() error { + rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) // attempt a simple rm -rf first - err := os.RemoveAll(rcpath) - if err == nil { - errChan <- nil - return + if err := os.RemoveAll(rcpath); err == nil { + return nil } // and if it fails get to the more complicated cleanup - errChan <- system.EnsureRemoveAll(rcpath) - }() - - go func() { - wg.Wait() - close(errChan) - }() + return system.EnsureRemoveAll(rcpath) + }) - var errors []error - for err := range errChan { - if err != nil { - errors = append(errors, err) - } + if multierr := wg.Wait(); multierr != nil { + return multierr.ErrorOrNil() } - return multierror.Append(nil, errors...).ErrorOrNil() + return nil }) } @@ -2679,7 +2768,7 @@ func (s *store) mount(id string, options drivers.MountOpts) (string, error) { defer rlstore.stopWriting() if options.UidMaps != nil || options.GidMaps != nil { - options.DisableShifting = !canUseShifting(rlstore, options.UidMaps, options.GidMaps) + options.DisableShifting = !s.canUseShifting(options.UidMaps, options.GidMaps) } if rlstore.Exists(id) { @@ -2756,27 +2845,21 @@ func (s *store) Unmount(id string, force bool) (bool, error) { if layerID, err := s.ContainerLayerID(id); err == nil { id = layerID } - var res bool - err := s.writeToLayerStore(func(rlstore rwLayerStore) error { + return writeToLayerStore(s, func(rlstore rwLayerStore) (bool, error) { if rlstore.Exists(id) { - var err error - res, err = rlstore.unmount(id, force, false) - return err + return rlstore.unmount(id, force, false) } - return ErrLayerUnknown + return false, ErrLayerUnknown }) - return res, err } func (s *store) Changes(from, to string) ([]archive.Change, error) { - var res []archive.Change - if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) ([]archive.Change, bool, error) { if store.Exists(to) { - var err error - res, err = store.Changes(from, to) - return true, err + res, err := store.Changes(from, to) + return res, true, err } - return false, nil + return nil, false, nil }); done { return res, err } @@ -2784,16 +2867,17 @@ func (s *store) Changes(from, to string) ([]archive.Change, error) { } func (s *store) DiffSize(from, to string) (int64, error) { - var res int64 = -1 - if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) (int64, bool, error) { if store.Exists(to) { - var err error - res, err = store.DiffSize(from, to) - return true, err + res, err := store.DiffSize(from, to) + return res, true, err } - return false, nil + return -1, false, nil }); done { - return res, err + if err != nil { + return -1, err + } + return res, nil } return -1, ErrLayerUnknown } @@ -2837,71 +2921,61 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro } func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error { - return s.writeToLayerStore(func(rlstore rwLayerStore) error { + _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { if !rlstore.Exists(to) { - return ErrLayerUnknown + return struct{}{}, ErrLayerUnknown } - return rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options) + return struct{}{}, rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options) }) + return err } func (s *store) CleanupStagingDirectory(stagingDirectory string) error { - return s.writeToLayerStore(func(rlstore rwLayerStore) error { - return rlstore.CleanupStagingDirectory(stagingDirectory) + _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { + return struct{}{}, rlstore.CleanupStagingDirectory(stagingDirectory) }) + return err } func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { - var res *drivers.DriverWithDifferOutput - err := s.writeToLayerStore(func(rlstore rwLayerStore) error { + return writeToLayerStore(s, func(rlstore rwLayerStore) (*drivers.DriverWithDifferOutput, error) { if to != "" && !rlstore.Exists(to) { - return ErrLayerUnknown + return nil, ErrLayerUnknown } - var err error - res, err = rlstore.ApplyDiffWithDiffer(to, options, differ) - return err + return rlstore.ApplyDiffWithDiffer(to, options, differ) }) - return res, err } func (s *store) DifferTarget(id string) (string, error) { - var res string - err := s.writeToLayerStore(func(rlstore rwLayerStore) error { + return writeToLayerStore(s, func(rlstore rwLayerStore) (string, error) { if rlstore.Exists(id) { - var err error - res, err = rlstore.DifferTarget(id) - return err + return rlstore.DifferTarget(id) } - return ErrLayerUnknown + return "", ErrLayerUnknown }) - return res, err } func (s *store) ApplyDiff(to string, diff io.Reader) (int64, error) { - var res int64 = -1 - err := s.writeToLayerStore(func(rlstore rwLayerStore) error { + return writeToLayerStore(s, func(rlstore rwLayerStore) (int64, error) { if rlstore.Exists(to) { - var err error - res, err = rlstore.ApplyDiff(to, diff) - return err + return rlstore.ApplyDiff(to, diff) } - return ErrLayerUnknown + return -1, ErrLayerUnknown }) - return res, err } func (s *store) layersByMappedDigest(m func(roLayerStore, digest.Digest) ([]Layer, error), d digest.Digest) ([]Layer, error) { var layers []Layer - if _, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if _, _, err := readAllLayerStores(s, func(store roLayerStore) (struct{}, bool, error) { storeLayers, err := m(store, d) if err != nil { if !errors.Is(err, ErrLayerUnknown) { - return true, err + return struct{}{}, true, err } - return false, nil + return struct{}{}, false, nil } layers = append(layers, storeLayers...) - return false, nil + return struct{}{}, false, nil }); err != nil { return nil, err } @@ -2926,16 +3000,17 @@ func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { } func (s *store) LayerSize(id string) (int64, error) { - var res int64 = -1 - if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) (int64, bool, error) { if store.Exists(id) { - var err error - res, err = store.Size(id) - return true, err + res, err := store.Size(id) + return res, true, err } - return false, nil + return -1, false, nil }); done { - return res, err + if err != nil { + return -1, err + } + return res, nil } return -1, ErrLayerUnknown } @@ -2980,13 +3055,13 @@ func (s *store) ContainerParentOwners(id string) ([]int, []int, error) { func (s *store) Layers() ([]Layer, error) { var layers []Layer - if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if _, done, err := readAllLayerStores(s, func(store roLayerStore) (struct{}, bool, error) { storeLayers, err := store.Layers() if err != nil { - return true, err + return struct{}{}, true, err } layers = append(layers, storeLayers...) - return false, nil + return struct{}{}, false, nil }); done { return nil, err } @@ -2995,13 +3070,13 @@ func (s *store) Layers() ([]Layer, error) { func (s *store) Images() ([]Image, error) { var images []Image - if _, err := s.readAllImageStores(func(store roImageStore) (bool, error) { + if _, _, err := readAllImageStores(s, func(store roImageStore) (struct{}, bool, error) { storeImages, err := store.Images() if err != nil { - return true, err + return struct{}{}, true, err } images = append(images, storeImages...) - return false, nil + return struct{}{}, false, nil }); err != nil { return nil, err } @@ -3009,23 +3084,20 @@ func (s *store) Images() ([]Image, error) { } func (s *store) Containers() ([]Container, error) { - if err := s.containerStore.startReading(); err != nil { - return nil, err - } - defer s.containerStore.stopReading() - - return s.containerStore.Containers() + res, _, err := readContainerStore(s, func() ([]Container, bool, error) { + res, err := s.containerStore.Containers() + return res, true, err + }) + return res, err } func (s *store) Layer(id string) (*Layer, error) { - var res *Layer - if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) (*Layer, bool, error) { layer, err := store.Get(id) if err == nil { - res = layer - return true, nil + return layer, true, nil } - return false, nil + return nil, false, nil }); done { return res, err } @@ -3119,8 +3191,7 @@ func (al *additionalLayer) Release() { } func (s *store) Image(id string) (*Image, error) { - var res *Image - if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { + if res, done, err := readAllImageStores(s, func(store roImageStore) (*Image, bool, error) { image, err := store.Get(id) if err == nil { if store != s.imageStore { @@ -3130,13 +3201,12 @@ func (s *store) Image(id string) (*Image, error) { // store, but we have an entry with the same ID in the read-write store, // then the name was removed when we duplicated the image's // record into writable storage, so we should ignore this entry - return false, nil + return nil, false, nil } } - res = image - return true, nil + return image, true, nil } - return false, nil + return nil, false, nil }); done { return res, err } @@ -3150,10 +3220,10 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { } images := []*Image{} - if _, err := s.readAllImageStores(func(store roImageStore) (bool, error) { + if _, _, err := readAllImageStores(s, func(store roImageStore) (struct{}, bool, error) { imageList, err := store.Images() if err != nil { - return true, err + return struct{}{}, true, err } for _, image := range imageList { image := image @@ -3161,7 +3231,7 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { images = append(images, &image) } } - return false, nil + return struct{}{}, false, nil }); err != nil { return nil, err } @@ -3170,13 +3240,13 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) { images := []*Image{} - if _, err := s.readAllImageStores(func(store roImageStore) (bool, error) { + if _, _, err := readAllImageStores(s, func(store roImageStore) (struct{}, bool, error) { imageList, err := store.ByDigest(d) if err != nil && !errors.Is(err, ErrImageUnknown) { - return true, err + return struct{}{}, true, err } images = append(images, imageList...) - return false, nil + return struct{}{}, false, nil }); err != nil { return nil, err } @@ -3184,20 +3254,18 @@ func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) { } func (s *store) Container(id string) (*Container, error) { - if err := s.containerStore.startReading(); err != nil { - return nil, err - } - defer s.containerStore.stopReading() - - return s.containerStore.Get(id) + res, _, err := readContainerStore(s, func() (*Container, bool, error) { + res, err := s.containerStore.Get(id) + return res, true, err + }) + return res, err } func (s *store) ContainerLayerID(id string) (string, error) { - if err := s.containerStore.startReading(); err != nil { - return "", err - } - defer s.containerStore.stopReading() - container, err := s.containerStore.Get(id) + container, _, err := readContainerStore(s, func() (*Container, bool, error) { + res, err := s.containerStore.Get(id) + return res, true, err + }) if err != nil { return "", err } @@ -3209,11 +3277,10 @@ func (s *store) ContainerByLayer(id string) (*Container, error) { if err != nil { return nil, err } - if err := s.containerStore.startReading(); err != nil { - return nil, err - } - defer s.containerStore.stopReading() - containerList, err := s.containerStore.Containers() + containerList, _, err := readContainerStore(s, func() ([]Container, bool, error) { + res, err := s.containerStore.Containers() + return res, true, err + }) if err != nil { return nil, err } @@ -3227,41 +3294,37 @@ func (s *store) ContainerByLayer(id string) (*Container, error) { } func (s *store) ContainerDirectory(id string) (string, error) { - if err := s.containerStore.startReading(); err != nil { - return "", err - } - defer s.containerStore.stopReading() - - id, err := s.containerStore.Lookup(id) - if err != nil { - return "", err - } + res, _, err := readContainerStore(s, func() (string, bool, error) { + id, err := s.containerStore.Lookup(id) + if err != nil { + return "", true, err + } - middleDir := s.graphDriverName + "-containers" - gcpath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata") - if err := os.MkdirAll(gcpath, 0700); err != nil { - return "", err - } - return gcpath, nil + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(gcpath, 0o700); err != nil { + return "", true, err + } + return gcpath, true, nil + }) + return res, err } func (s *store) ContainerRunDirectory(id string) (string, error) { - if err := s.containerStore.startReading(); err != nil { - return "", err - } - defer s.containerStore.stopReading() - - id, err := s.containerStore.Lookup(id) - if err != nil { - return "", err - } + res, _, err := readContainerStore(s, func() (string, bool, error) { + id, err := s.containerStore.Lookup(id) + if err != nil { + return "", true, err + } - middleDir := s.graphDriverName + "-containers" - rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata") - if err := os.MkdirAll(rcpath, 0700); err != nil { - return "", err - } - return rcpath, nil + middleDir := s.graphDriverName + "-containers" + rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(rcpath, 0o700); err != nil { + return "", true, err + } + return rcpath, true, nil + }) + return res, err } func (s *store) SetContainerDirectoryFile(id, file string, data []byte) error { @@ -3269,11 +3332,11 @@ func (s *store) SetContainerDirectoryFile(id, file string, data []byte) error { if err != nil { return err } - err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700) + err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0o700) if err != nil { return err } - return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600) + return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0o600) } func (s *store) FromContainerDirectory(id, file string) ([]byte, error) { @@ -3289,11 +3352,11 @@ func (s *store) SetContainerRunDirectoryFile(id, file string, data []byte) error if err != nil { return err } - err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700) + err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0o700) if err != nil { return err } - return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600) + return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0o600) } func (s *store) FromContainerRunDirectory(id, file string) ([]byte, error) { @@ -3541,19 +3604,19 @@ func (s *store) Free() { // Tries to clean up old unreferenced container leftovers. returns the first error // but continues as far as it can func (s *store) GarbageCollect() error { - firstErr := s.writeToContainerStore(func() error { - return s.containerStore.GarbageCollect() + _, firstErr := writeToContainerStore(s, func() (struct{}, error) { + return struct{}{}, s.containerStore.GarbageCollect() }) - moreErr := s.writeToImageStore(func() error { - return s.imageStore.GarbageCollect() + _, moreErr := writeToImageStore(s, func() (struct{}, error) { + return struct{}{}, s.imageStore.GarbageCollect() }) if firstErr == nil { firstErr = moreErr } - moreErr = s.writeToLayerStore(func(rlstore rwLayerStore) error { - return rlstore.GarbageCollect() + _, moreErr = writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { + return struct{}{}, rlstore.GarbageCollect() }) if firstErr == nil { firstErr = moreErr diff --git a/vendor/github.com/containers/storage/types/errors.go b/vendor/github.com/containers/storage/types/errors.go index dc6ee3e0c..845b14eed 100644 --- a/vendor/github.com/containers/storage/types/errors.go +++ b/vendor/github.com/containers/storage/types/errors.go @@ -59,4 +59,41 @@ var ( ErrInvalidMappings = errors.New("invalid mappings specified") // ErrNoAvailableIDs is returned when there are not enough unused IDS within the user namespace. ErrNoAvailableIDs = errors.New("not enough unused IDs in user namespace") + + // ErrLayerUnaccounted describes a layer that is present in the lower-level storage driver, + // but which is not known to or managed by the higher-level driver-agnostic logic. + ErrLayerUnaccounted = errors.New("layer in lower level storage driver not accounted for") + // ErrLayerUnreferenced describes a layer which is not used by any image or container. + ErrLayerUnreferenced = errors.New("layer not referenced by any images or containers") + // ErrLayerIncorrectContentDigest describes a layer for which the contents of one or more + // files which were added in the layer appear to have changed. It may instead look like an + // unnamed "file integrity checksum failed" error. + ErrLayerIncorrectContentDigest = errors.New("layer content incorrect digest") + // ErrLayerIncorrectContentSize describes a layer for which regenerating the diff that was + // used to populate the layer produced a diff of a different size. We check the digest + // first, so it's highly unlikely you'll ever see this error. + ErrLayerIncorrectContentSize = errors.New("layer content incorrect size") + // ErrLayerContentModified describes a layer which contains contents which should not be + // there, or for which ownership/permissions/dates have been changed. + ErrLayerContentModified = errors.New("layer content modified") + // ErrLayerDataMissing describes a layer which is missing a big data item. + ErrLayerDataMissing = errors.New("layer data item is missing") + // ErrLayerMissing describes a layer which is the missing parent of a layer. + ErrLayerMissing = errors.New("layer is missing") + // ErrImageLayerMissing describes an image which claims to have a layer that we don't know + // about. + ErrImageLayerMissing = errors.New("image layer is missing") + // ErrImageDataMissing describes an image which is missing a big data item. + ErrImageDataMissing = errors.New("image data item is missing") + // ErrImageDataIncorrectSize describes an image which has a big data item which looks like + // its size has changed, likely because it's been modified somehow. + ErrImageDataIncorrectSize = errors.New("image data item has incorrect size") + // ErrContainerImageMissing describes a container which claims to be based on an image that + // we don't know about. + ErrContainerImageMissing = errors.New("image missing") + // ErrContainerDataMissing describes a container which is missing a big data item. + ErrContainerDataMissing = errors.New("container data item is missing") + // ErrContainerDataIncorrectSize describes a container which has a big data item which looks + // like its size has changed, likely because it's been modified somehow. + ErrContainerDataIncorrectSize = errors.New("container data item has incorrect size") ) diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go index 7189a8e6a..3ff00ac64 100644 --- a/vendor/github.com/containers/storage/types/options.go +++ b/vendor/github.com/containers/storage/types/options.go @@ -21,6 +21,7 @@ type TomlConfig struct { Driver string `toml:"driver,omitempty"` DriverPriority []string `toml:"driver_priority,omitempty"` RunRoot string `toml:"runroot,omitempty"` + ImageStore string `toml:"imagestore,omitempty"` GraphRoot string `toml:"graphroot,omitempty"` RootlessStoragePath string `toml:"rootless_storage_path,omitempty"` TransientStore bool `toml:"transient_store,omitempty"` @@ -215,6 +216,10 @@ type StoreOptions struct { // GraphRoot is the filesystem path under which we will store the // contents of layers, images, and containers. GraphRoot string `json:"root,omitempty"` + // Image Store is the location of image store which is seperated from the + // container store. Usually this is not recommended unless users wants + // seperate store for image and containers. + ImageStore string `json:"imagestore,omitempty"` // RootlessStoragePath is the storage path for rootless users // default $HOME/.local/share/containers/storage RootlessStoragePath string `toml:"rootless_storage_path"` @@ -295,6 +300,7 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti // present. if defaultConfigFileSet { opts.GraphDriverOptions = systemOpts.GraphDriverOptions + opts.ImageStore = systemOpts.ImageStore } else if opts.GraphDriverName == overlayDriver { for _, o := range systemOpts.GraphDriverOptions { if strings.Contains(o, "ignore_chown_errors") { @@ -305,7 +311,23 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti } if opts.GraphDriverName == "" { if len(systemOpts.GraphDriverPriority) == 0 { - opts.GraphDriverName = "vfs" + dirEntries, err := os.ReadDir(opts.GraphRoot) + if err == nil { + for _, entry := range dirEntries { + if strings.HasSuffix(entry.Name(), "-images") { + opts.GraphDriverName = strings.TrimSuffix(entry.Name(), "-images") + break + } + } + } + + if opts.GraphDriverName == "" { + if canUseRootlessOverlay(opts.GraphRoot, opts.RunRoot) { + opts.GraphDriverName = overlayDriver + } else { + opts.GraphDriverName = "vfs" + } + } } else { opts.GraphDriverPriority = systemOpts.GraphDriverPriority } @@ -405,6 +427,9 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro if config.Storage.GraphRoot != "" { storeOptions.GraphRoot = config.Storage.GraphRoot } + if config.Storage.ImageStore != "" { + storeOptions.ImageStore = config.Storage.ImageStore + } if config.Storage.RootlessStoragePath != "" { storeOptions.RootlessStoragePath = config.Storage.RootlessStoragePath } @@ -432,6 +457,16 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro if config.Storage.Options.MountOpt != "" { storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.MountOpt)) } + + uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids") + if err != nil { + return err + } + gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids") + if err != nil { + return err + } + if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup == "" { config.Storage.Options.RemapGroup = config.Storage.Options.RemapUser } @@ -444,19 +479,9 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro logrus.Warningf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err) return err } - storeOptions.UIDMap = mappings.UIDs() - storeOptions.GIDMap = mappings.GIDs() + uidmap = mappings.UIDs() + gidmap = mappings.GIDs() } - - uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids") - if err != nil { - return err - } - gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids") - if err != nil { - return err - } - storeOptions.UIDMap = uidmap storeOptions.GIDMap = gidmap storeOptions.RootAutoNsUser = config.Storage.Options.RootAutoUsernsUser diff --git a/vendor/github.com/containers/storage/types/options_darwin.go b/vendor/github.com/containers/storage/types/options_darwin.go index eed1a3d94..3eecc2b82 100644 --- a/vendor/github.com/containers/storage/types/options_darwin.go +++ b/vendor/github.com/containers/storage/types/options_darwin.go @@ -8,6 +8,9 @@ const ( SystemConfigFile = "/usr/share/containers/storage.conf" ) -var ( - defaultOverrideConfigFile = "/etc/containers/storage.conf" -) +var defaultOverrideConfigFile = "/etc/containers/storage.conf" + +// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers +func canUseRootlessOverlay(home, runhome string) bool { + return false +} diff --git a/vendor/github.com/containers/storage/types/options_freebsd.go b/vendor/github.com/containers/storage/types/options_freebsd.go index afb7ec6b4..be2bc2f27 100644 --- a/vendor/github.com/containers/storage/types/options_freebsd.go +++ b/vendor/github.com/containers/storage/types/options_freebsd.go @@ -12,3 +12,8 @@ const ( var ( defaultOverrideConfigFile = "/usr/local/etc/containers/storage.conf" ) + +// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers +func canUseRootlessOverlay(home, runhome string) bool { + return false +} diff --git a/vendor/github.com/containers/storage/types/options_linux.go b/vendor/github.com/containers/storage/types/options_linux.go index d44aaf76a..a28e82883 100644 --- a/vendor/github.com/containers/storage/types/options_linux.go +++ b/vendor/github.com/containers/storage/types/options_linux.go @@ -1,5 +1,13 @@ package types +import ( + "os/exec" + "strconv" + "strings" + + "golang.org/x/sys/unix" +) + const ( // these are default path for run and graph root for rootful users // for rootless path is constructed via getRootlessStorageOpts @@ -12,3 +20,33 @@ const ( var ( defaultOverrideConfigFile = "/etc/containers/storage.conf" ) + +// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers +func canUseRootlessOverlay(home, runhome string) bool { + // we check first for fuse-overlayfs since it is cheaper. + if path, _ := exec.LookPath("fuse-overlayfs"); path != "" { + return true + } + + // We cannot use overlay.SupportsNativeOverlay since canUseRootlessOverlay is called by Podman + // before we enter the user namespace and the driver we pick here is written in the podman database. + // Checking the kernel version is usually not a good idea since the feature could be back-ported, e.g. RHEL + // but this is just an heuristic and on RHEL we always install the storage.conf file. + // native overlay for rootless was added upstream in 5.13 (at least the first version that we support), so check + // that the kernel is >= 5.13. + var uts unix.Utsname + if err := unix.Uname(&uts); err == nil { + parts := strings.Split(string(uts.Release[:]), ".") + major, _ := strconv.Atoi(parts[0]) + if major >= 6 { + return true + } + if major == 5 && len(parts) > 1 { + minor, _ := strconv.Atoi(parts[1]) + if minor >= 13 { + return true + } + } + } + return false +} diff --git a/vendor/github.com/containers/storage/types/options_windows.go b/vendor/github.com/containers/storage/types/options_windows.go index d44aaf76a..c1bea9fac 100644 --- a/vendor/github.com/containers/storage/types/options_windows.go +++ b/vendor/github.com/containers/storage/types/options_windows.go @@ -12,3 +12,8 @@ const ( var ( defaultOverrideConfigFile = "/etc/containers/storage.conf" ) + +// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers +func canUseRootlessOverlay(home, runhome string) bool { + return false +} diff --git a/vendor/github.com/containers/storage/types/storage_test.conf b/vendor/github.com/containers/storage/types/storage_test.conf index 9b682fe15..87b0c9bb1 100644 --- a/vendor/github.com/containers/storage/types/storage_test.conf +++ b/vendor/github.com/containers/storage/types/storage_test.conf @@ -25,6 +25,16 @@ rootless_storage_path = "$HOME/$UID/containers/storage" additionalimagestores = [ ] +# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of +# a container, to the UIDs/GIDs as they should appear outside of the container, +# and the length of the range of UIDs/GIDs. Additional mapped sets can be +# listed and will be heeded by libraries, but there are limits to the number of +# mappings which the kernel will allow when you later attempt to run a +# container. +# +remap-uids = "0:1000000000:30000" +remap-gids = "0:1500000000:60000" + [storage.options.overlay] # mountopt specifies comma separated list of extra mount options diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go index 72c38f861..73134f82d 100644 --- a/vendor/github.com/containers/storage/types/utils.go +++ b/vendor/github.com/containers/storage/types/utils.go @@ -20,7 +20,7 @@ func GetRootlessRuntimeDir(rootlessUID int) (string, error) { return "", err } path = filepath.Join(path, "containers") - if err := os.MkdirAll(path, 0700); err != nil { + if err := os.MkdirAll(path, 0o700); err != nil { return "", fmt.Errorf("unable to make rootless runtime: %w", err) } return path, nil @@ -45,25 +45,30 @@ type rootlessRuntimeDirEnvironmentImplementation struct { func (env rootlessRuntimeDirEnvironmentImplementation) getProcCommandFile() string { return env.procCommandFile } + func (env rootlessRuntimeDirEnvironmentImplementation) getRunUserDir() string { return env.runUserDir } + func (env rootlessRuntimeDirEnvironmentImplementation) getTmpPerUserDir() string { return env.tmpPerUserDir } + func (rootlessRuntimeDirEnvironmentImplementation) homeDirGetRuntimeDir() (string, error) { return homedir.GetRuntimeDir() } + func (rootlessRuntimeDirEnvironmentImplementation) systemLstat(path string) (*system.StatT, error) { return system.Lstat(path) } + func (rootlessRuntimeDirEnvironmentImplementation) homedirGet() string { return homedir.Get() } func isRootlessRuntimeDirOwner(dir string, env rootlessRuntimeDirEnvironment) bool { st, err := env.systemLstat(dir) - return err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0700 == 0700 && st.Mode()&0066 == 0000 + return err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0o700 == 0o700 && st.Mode()&0o066 == 0o000 } // getRootlessRuntimeDirIsolated is an internal implementation detail of getRootlessRuntimeDir to allow testing. @@ -85,7 +90,7 @@ func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, e tmpPerUserDir := env.getTmpPerUserDir() if tmpPerUserDir != "" { if _, err := env.systemLstat(tmpPerUserDir); os.IsNotExist(err) { - if err := os.Mkdir(tmpPerUserDir, 0700); err != nil { + if err := os.Mkdir(tmpPerUserDir, 0o700); err != nil { logrus.Errorf("Failed to create temp directory for user: %v", err) } else { return tmpPerUserDir, nil diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml index 94ff801df..0cffafa7b 100644 --- a/vendor/github.com/go-logr/logr/.golangci.yaml +++ b/vendor/github.com/go-logr/logr/.golangci.yaml @@ -6,7 +6,6 @@ linters: disable-all: true enable: - asciicheck - - deadcode - errcheck - forcetypeassert - gocritic @@ -18,10 +17,8 @@ linters: - misspell - revive - staticcheck - - structcheck - typecheck - unused - - varcheck issues: exclude-use-default: false diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go index 9d92a38f1..99fe8be93 100644 --- a/vendor/github.com/go-logr/logr/discard.go +++ b/vendor/github.com/go-logr/logr/discard.go @@ -20,35 +20,5 @@ package logr // used whenever the caller is not interested in the logs. Logger instances // produced by this function always compare as equal. func Discard() Logger { - return Logger{ - level: 0, - sink: discardLogSink{}, - } -} - -// discardLogSink is a LogSink that discards all messages. -type discardLogSink struct{} - -// Verify that it actually implements the interface -var _ LogSink = discardLogSink{} - -func (l discardLogSink) Init(RuntimeInfo) { -} - -func (l discardLogSink) Enabled(int) bool { - return false -} - -func (l discardLogSink) Info(int, string, ...interface{}) { -} - -func (l discardLogSink) Error(error, string, ...interface{}) { -} - -func (l discardLogSink) WithValues(...interface{}) LogSink { - return l -} - -func (l discardLogSink) WithName(string) LogSink { - return l + return New(nil) } diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index 7accdb0c4..e52f0cd01 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -21,13 +21,13 @@ limitations under the License. // github.com/go-logr/logr.LogSink with output through an arbitrary // "write" function. See New and NewJSON for details. // -// Custom LogSinks +// # Custom LogSinks // // For users who need more control, a funcr.Formatter can be embedded inside // your own custom LogSink implementation. This is useful when the LogSink // needs to implement additional methods, for example. // -// Formatting +// # Formatting // // This will respect logr.Marshaler, fmt.Stringer, and error interfaces for // values which are being logged. When rendering a struct, funcr will use Go's @@ -37,6 +37,7 @@ package funcr import ( "bytes" "encoding" + "encoding/json" "fmt" "path/filepath" "reflect" @@ -217,7 +218,7 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { prefix: "", values: nil, depth: 0, - opts: opts, + opts: &opts, } return f } @@ -231,7 +232,7 @@ type Formatter struct { values []interface{} valuesStr string depth int - opts Options + opts *Options } // outputFormat indicates which outputFormat to use. @@ -447,6 +448,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s if flags&flagRawStruct == 0 { buf.WriteByte('{') } + printComma := false // testing i>0 is not enough because of JSON omitted fields for i := 0; i < t.NumField(); i++ { fld := t.Field(i) if fld.PkgPath != "" { @@ -478,9 +480,10 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s if omitempty && isEmpty(v.Field(i)) { continue } - if i > 0 { + if printComma { buf.WriteByte(',') } + printComma = true // if we got here, we are rendering a field if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) continue @@ -500,6 +503,20 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s } return buf.String() case reflect.Slice, reflect.Array: + // If this is outputing as JSON make sure this isn't really a json.RawMessage. + // If so just emit "as-is" and don't pretty it as that will just print + // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want. + if f.outputFormat == outputJSON { + if rm, ok := value.(json.RawMessage); ok { + // If it's empty make sure we emit an empty value as the array style would below. + if len(rm) > 0 { + buf.Write(rm) + } else { + buf.WriteString("null") + } + return buf.String() + } + } buf.WriteByte('[') for i := 0; i < v.Len(); i++ { if i > 0 { diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index c3b56b3d2..e027aea3f 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -21,7 +21,7 @@ limitations under the License. // to back that API. Packages in the Go ecosystem can depend on this package, // while callers can implement logging with whatever backend is appropriate. // -// Usage +// # Usage // // Logging is done using a Logger instance. Logger is a concrete type with // methods, which defers the actual logging to a LogSink interface. The main @@ -30,16 +30,20 @@ limitations under the License. // "structured logging". // // With Go's standard log package, we might write: -// log.Printf("setting target value %s", targetValue) +// +// log.Printf("setting target value %s", targetValue) // // With logr's structured logging, we'd write: -// logger.Info("setting target", "value", targetValue) +// +// logger.Info("setting target", "value", targetValue) // // Errors are much the same. Instead of: -// log.Printf("failed to open the pod bay door for user %s: %v", user, err) +// +// log.Printf("failed to open the pod bay door for user %s: %v", user, err) // // We'd write: -// logger.Error(err, "failed to open the pod bay door", "user", user) +// +// logger.Error(err, "failed to open the pod bay door", "user", user) // // Info() and Error() are very similar, but they are separate methods so that // LogSink implementations can choose to do things like attach additional @@ -47,7 +51,7 @@ limitations under the License. // always logged, regardless of the current verbosity. If there is no error // instance available, passing nil is valid. // -// Verbosity +// # Verbosity // // Often we want to log information only when the application in "verbose // mode". To write log lines that are more verbose, Logger has a V() method. @@ -58,20 +62,22 @@ limitations under the License. // Error messages do not have a verbosity level and are always logged. // // Where we might have written: -// if flVerbose >= 2 { -// log.Printf("an unusual thing happened") -// } +// +// if flVerbose >= 2 { +// log.Printf("an unusual thing happened") +// } // // We can write: -// logger.V(2).Info("an unusual thing happened") // -// Logger Names +// logger.V(2).Info("an unusual thing happened") +// +// # Logger Names // // Logger instances can have name strings so that all messages logged through // that instance have additional context. For example, you might want to add // a subsystem name: // -// logger.WithName("compactor").Info("started", "time", time.Now()) +// logger.WithName("compactor").Info("started", "time", time.Now()) // // The WithName() method returns a new Logger, which can be passed to // constructors or other functions for further use. Repeated use of WithName() @@ -82,25 +88,27 @@ limitations under the License. // joining operation (e.g. whitespace, commas, periods, slashes, brackets, // quotes, etc). // -// Saved Values +// # Saved Values // // Logger instances can store any number of key/value pairs, which will be // logged alongside all messages logged through that instance. For example, // you might want to create a Logger instance per managed object: // // With the standard log package, we might write: -// log.Printf("decided to set field foo to value %q for object %s/%s", -// targetValue, object.Namespace, object.Name) +// +// log.Printf("decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) // // With logr we'd write: -// // Elsewhere: set up the logger to log the object name. -// obj.logger = mainLogger.WithValues( -// "name", obj.name, "namespace", obj.namespace) // -// // later on... -// obj.logger.Info("setting foo", "value", targetValue) +// // Elsewhere: set up the logger to log the object name. +// obj.logger = mainLogger.WithValues( +// "name", obj.name, "namespace", obj.namespace) +// +// // later on... +// obj.logger.Info("setting foo", "value", targetValue) // -// Best Practices +// # Best Practices // // Logger has very few hard rules, with the goal that LogSink implementations // might have a lot of freedom to differentiate. There are, however, some @@ -124,15 +132,15 @@ limitations under the License. // around. For cases where passing a logger is optional, a pointer to Logger // should be used. // -// Key Naming Conventions +// # Key Naming Conventions // // Keys are not strictly required to conform to any specification or regex, but // it is recommended that they: -// * be human-readable and meaningful (not auto-generated or simple ordinals) -// * be constant (not dependent on input data) -// * contain only printable characters -// * not contain whitespace or punctuation -// * use lower case for simple keys and lowerCamelCase for more complex ones +// - be human-readable and meaningful (not auto-generated or simple ordinals) +// - be constant (not dependent on input data) +// - contain only printable characters +// - not contain whitespace or punctuation +// - use lower case for simple keys and lowerCamelCase for more complex ones // // These guidelines help ensure that log data is processed properly regardless // of the log implementation. For example, log implementations will try to @@ -141,51 +149,54 @@ limitations under the License. // While users are generally free to use key names of their choice, it's // generally best to avoid using the following keys, as they're frequently used // by implementations: -// * "caller": the calling information (file/line) of a particular log line -// * "error": the underlying error value in the `Error` method -// * "level": the log level -// * "logger": the name of the associated logger -// * "msg": the log message -// * "stacktrace": the stack trace associated with a particular log line or -// error (often from the `Error` message) -// * "ts": the timestamp for a log line +// - "caller": the calling information (file/line) of a particular log line +// - "error": the underlying error value in the `Error` method +// - "level": the log level +// - "logger": the name of the associated logger +// - "msg": the log message +// - "stacktrace": the stack trace associated with a particular log line or +// error (often from the `Error` message) +// - "ts": the timestamp for a log line // // Implementations are encouraged to make use of these keys to represent the // above concepts, when necessary (for example, in a pure-JSON output form, it // would be necessary to represent at least message and timestamp as ordinary // named values). // -// Break Glass +// # Break Glass // // Implementations may choose to give callers access to the underlying // logging implementation. The recommended pattern for this is: -// // Underlier exposes access to the underlying logging implementation. -// // Since callers only have a logr.Logger, they have to know which -// // implementation is in use, so this interface is less of an abstraction -// // and more of way to test type conversion. -// type Underlier interface { -// GetUnderlying() -// } +// +// // Underlier exposes access to the underlying logging implementation. +// // Since callers only have a logr.Logger, they have to know which +// // implementation is in use, so this interface is less of an abstraction +// // and more of way to test type conversion. +// type Underlier interface { +// GetUnderlying() +// } // // Logger grants access to the sink to enable type assertions like this: -// func DoSomethingWithImpl(log logr.Logger) { -// if underlier, ok := log.GetSink()(impl.Underlier) { -// implLogger := underlier.GetUnderlying() -// ... -// } -// } +// +// func DoSomethingWithImpl(log logr.Logger) { +// if underlier, ok := log.GetSink().(impl.Underlier); ok { +// implLogger := underlier.GetUnderlying() +// ... +// } +// } // // Custom `With*` functions can be implemented by copying the complete // Logger struct and replacing the sink in the copy: -// // WithFooBar changes the foobar parameter in the log sink and returns a -// // new logger with that modified sink. It does nothing for loggers where -// // the sink doesn't support that parameter. -// func WithFoobar(log logr.Logger, foobar int) logr.Logger { -// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok { -// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) -// } -// return log -// } +// +// // WithFooBar changes the foobar parameter in the log sink and returns a +// // new logger with that modified sink. It does nothing for loggers where +// // the sink doesn't support that parameter. +// func WithFoobar(log logr.Logger, foobar int) logr.Logger { +// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok { +// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) +// } +// return log +// } // // Don't use New to construct a new Logger with a LogSink retrieved from an // existing Logger. Source code attribution might not work correctly and @@ -201,11 +212,14 @@ import ( ) // New returns a new Logger instance. This is primarily used by libraries -// implementing LogSink, rather than end users. +// implementing LogSink, rather than end users. Passing a nil sink will create +// a Logger which discards all log lines. func New(sink LogSink) Logger { logger := Logger{} logger.setSink(sink) - sink.Init(runtimeInfo) + if sink != nil { + sink.Init(runtimeInfo) + } return logger } @@ -244,7 +258,7 @@ type Logger struct { // Enabled tests whether this Logger is enabled. For example, commandline // flags might be used to set the logging verbosity and disable some info logs. func (l Logger) Enabled() bool { - return l.sink.Enabled(l.level) + return l.sink != nil && l.sink.Enabled(l.level) } // Info logs a non-error message with the given key/value pairs as context. @@ -254,6 +268,9 @@ func (l Logger) Enabled() bool { // information. The key/value pairs must alternate string keys and arbitrary // values. func (l Logger) Info(msg string, keysAndValues ...interface{}) { + if l.sink == nil { + return + } if l.Enabled() { if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() @@ -273,6 +290,9 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) { // triggered this log line, if present. The err parameter is optional // and nil may be passed instead of an error instance. func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { + if l.sink == nil { + return + } if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() } @@ -284,6 +304,9 @@ func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { // level means a log message is less important. Negative V-levels are treated // as 0. func (l Logger) V(level int) Logger { + if l.sink == nil { + return l + } if level < 0 { level = 0 } @@ -294,6 +317,9 @@ func (l Logger) V(level int) Logger { // WithValues returns a new Logger instance with additional key/value pairs. // See Info for documentation on how key/value pairs work. func (l Logger) WithValues(keysAndValues ...interface{}) Logger { + if l.sink == nil { + return l + } l.setSink(l.sink.WithValues(keysAndValues...)) return l } @@ -304,6 +330,9 @@ func (l Logger) WithValues(keysAndValues ...interface{}) Logger { // contain only letters, digits, and hyphens (see the package documentation for // more information). func (l Logger) WithName(name string) Logger { + if l.sink == nil { + return l + } l.setSink(l.sink.WithName(name)) return l } @@ -324,6 +353,9 @@ func (l Logger) WithName(name string) Logger { // WithCallDepth(1) because it works with implementions that support the // CallDepthLogSink and/or CallStackHelperLogSink interfaces. func (l Logger) WithCallDepth(depth int) Logger { + if l.sink == nil { + return l + } if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { l.setSink(withCallDepth.WithCallDepth(depth)) } @@ -345,6 +377,9 @@ func (l Logger) WithCallDepth(depth int) Logger { // implementation does not support either of these, the original Logger will be // returned. func (l Logger) WithCallStackHelper() (func(), Logger) { + if l.sink == nil { + return func() {}, l + } var helper func() if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { l.setSink(withCallDepth.WithCallDepth(1)) @@ -357,6 +392,11 @@ func (l Logger) WithCallStackHelper() (func(), Logger) { return helper, l } +// IsZero returns true if this logger is an uninitialized zero value +func (l Logger) IsZero() bool { + return l.sink == nil +} + // contextKey is how we find Loggers in a context.Context. type contextKey struct{} @@ -442,7 +482,7 @@ type LogSink interface { WithName(name string) LogSink } -// CallDepthLogSink represents a Logger that knows how to climb the call stack +// CallDepthLogSink represents a LogSink that knows how to climb the call stack // to identify the original call site and can offset the depth by a specified // number of frames. This is useful for users who have helper functions // between the "real" call site and the actual calls to Logger methods. @@ -467,7 +507,7 @@ type CallDepthLogSink interface { WithCallDepth(depth int) LogSink } -// CallStackHelperLogSink represents a Logger that knows how to climb +// CallStackHelperLogSink represents a LogSink that knows how to climb // the call stack to identify the original call site and can skip // intermediate helper functions if they mark themselves as // helper. Go's testing package uses that approach. diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index f78ab684a..d971fbe34 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -341,12 +341,21 @@ type zeroable interface { // IsZero returns true when the value passed into the function is a zero value. // This allows for safer checking of interface values. func IsZero(data interface{}) bool { + v := reflect.ValueOf(data) + // check for nil data + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return true + } + } + // check for things that have an IsZero method instead if vv, ok := data.(zeroable); ok { return vv.IsZero() } + // continue with slightly more complex reflection - v := reflect.ValueOf(data) switch v.Kind() { case reflect.String: return v.Len() == 0 @@ -358,14 +367,13 @@ func IsZero(data interface{}) bool { return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() case reflect.Struct, reflect.Array: return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) case reflect.Invalid: return true + default: + return false } - return false } // AddInitialisms add additional initialisms diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go index 2a26b66d0..5b0d01769 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go +++ b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go @@ -17,6 +17,7 @@ package name import ( "net" "net/url" + "path" "regexp" "strings" ) @@ -50,6 +51,11 @@ func (r Registry) String() string { return r.Name() } +// Repo returns a Repository in the Registry with the given name. +func (r Registry) Repo(repo ...string) Repository { + return Repository{Registry: r, repository: path.Join(repo...)} +} + // Scope returns the scope required to access the registry. func (r Registry) Scope(string) string { // The only resource under 'registry' is 'catalog'. http://goo.gl/N9cN9Z diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go index ab7f03ae2..c8a1beb8a 100644 --- a/vendor/github.com/google/pprof/profile/encode.go +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -17,6 +17,7 @@ package profile import ( "errors" "sort" + "strings" ) func (p *Profile) decoder() []decoder { @@ -183,12 +184,13 @@ var profileDecoder = []decoder{ // repeated Location location = 4 func(b *buffer, m message) error { x := new(Location) - x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer + x.Line = b.tmpLines[:0] // Use shared space temporarily pp := m.(*Profile) pp.Location = append(pp.Location, x) err := decodeMessage(b, x) - var tmp []Line - x.Line = append(tmp, x.Line...) // Shrink to allocated size + b.tmpLines = x.Line[:0] + // Copy to shrink size and detach from shared space. + x.Line = append([]Line(nil), x.Line...) return err }, // repeated Function function = 5 @@ -252,6 +254,14 @@ func (p *Profile) postDecode() error { } else { mappings[m.ID] = m } + + // If this a main linux kernel mapping with a relocation symbol suffix + // ("[kernel.kallsyms]_text"), extract said suffix. + // It is fairly hacky to handle at this level, but the alternatives appear even worse. + if strings.HasPrefix(m.File, "[kernel.kallsyms]") { + m.KernelRelocationSymbol = strings.ReplaceAll(m.File, "[kernel.kallsyms]", "") + } + } functions := make(map[uint64]*Function, len(p.Function)) @@ -298,41 +308,52 @@ func (p *Profile) postDecode() error { st.Unit, err = getString(p.stringTable, &st.unitX, err) } + // Pre-allocate space for all locations. + numLocations := 0 + for _, s := range p.Sample { + numLocations += len(s.locationIDX) + } + locBuffer := make([]*Location, numLocations) + for _, s := range p.Sample { - labels := make(map[string][]string, len(s.labelX)) - numLabels := make(map[string][]int64, len(s.labelX)) - numUnits := make(map[string][]string, len(s.labelX)) - for _, l := range s.labelX { - var key, value string - key, err = getString(p.stringTable, &l.keyX, err) - if l.strX != 0 { - value, err = getString(p.stringTable, &l.strX, err) - labels[key] = append(labels[key], value) - } else if l.numX != 0 || l.unitX != 0 { - numValues := numLabels[key] - units := numUnits[key] - if l.unitX != 0 { - var unit string - unit, err = getString(p.stringTable, &l.unitX, err) - units = padStringArray(units, len(numValues)) - numUnits[key] = append(units, unit) + if len(s.labelX) > 0 { + labels := make(map[string][]string, len(s.labelX)) + numLabels := make(map[string][]int64, len(s.labelX)) + numUnits := make(map[string][]string, len(s.labelX)) + for _, l := range s.labelX { + var key, value string + key, err = getString(p.stringTable, &l.keyX, err) + if l.strX != 0 { + value, err = getString(p.stringTable, &l.strX, err) + labels[key] = append(labels[key], value) + } else if l.numX != 0 || l.unitX != 0 { + numValues := numLabels[key] + units := numUnits[key] + if l.unitX != 0 { + var unit string + unit, err = getString(p.stringTable, &l.unitX, err) + units = padStringArray(units, len(numValues)) + numUnits[key] = append(units, unit) + } + numLabels[key] = append(numLabels[key], l.numX) } - numLabels[key] = append(numLabels[key], l.numX) } - } - if len(labels) > 0 { - s.Label = labels - } - if len(numLabels) > 0 { - s.NumLabel = numLabels - for key, units := range numUnits { - if len(units) > 0 { - numUnits[key] = padStringArray(units, len(numLabels[key])) + if len(labels) > 0 { + s.Label = labels + } + if len(numLabels) > 0 { + s.NumLabel = numLabels + for key, units := range numUnits { + if len(units) > 0 { + numUnits[key] = padStringArray(units, len(numLabels[key])) + } } + s.NumUnit = numUnits } - s.NumUnit = numUnits } - s.Location = make([]*Location, len(s.locationIDX)) + + s.Location = locBuffer[:len(s.locationIDX)] + locBuffer = locBuffer[len(s.locationIDX):] for i, lid := range s.locationIDX { if lid < uint64(len(locationIds)) { s.Location[i] = locationIds[lid] diff --git a/vendor/github.com/google/pprof/profile/filter.go b/vendor/github.com/google/pprof/profile/filter.go index ea8e66c68..c794b9390 100644 --- a/vendor/github.com/google/pprof/profile/filter.go +++ b/vendor/github.com/google/pprof/profile/filter.go @@ -22,6 +22,10 @@ import "regexp" // samples where at least one frame matches focus but none match ignore. // Returns true is the corresponding regexp matched at least one sample. func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) { + if focus == nil && ignore == nil && hide == nil && show == nil { + fm = true // Missing focus implies a match + return + } focusOrIgnore := make(map[uint64]bool) hidden := make(map[uint64]bool) for _, l := range p.Location { diff --git a/vendor/github.com/google/pprof/profile/legacy_profile.go b/vendor/github.com/google/pprof/profile/legacy_profile.go index 0c8f3bb5b..8d07fd6c2 100644 --- a/vendor/github.com/google/pprof/profile/legacy_profile.go +++ b/vendor/github.com/google/pprof/profile/legacy_profile.go @@ -295,11 +295,12 @@ func get64b(b []byte) (uint64, []byte) { // // The general format for profilez samples is a sequence of words in // binary format. The first words are a header with the following data: -// 1st word -- 0 -// 2nd word -- 3 -// 3rd word -- 0 if a c++ application, 1 if a java application. -// 4th word -- Sampling period (in microseconds). -// 5th word -- Padding. +// +// 1st word -- 0 +// 2nd word -- 3 +// 3rd word -- 0 if a c++ application, 1 if a java application. +// 4th word -- Sampling period (in microseconds). +// 5th word -- Padding. func parseCPU(b []byte) (*Profile, error) { var parse func([]byte) (uint64, []byte) var n1, n2, n3, n4, n5 uint64 @@ -403,15 +404,18 @@ func cleanupDuplicateLocations(p *Profile) { // // profilez samples are a repeated sequence of stack frames of the // form: -// 1st word -- The number of times this stack was encountered. -// 2nd word -- The size of the stack (StackSize). -// 3rd word -- The first address on the stack. -// ... -// StackSize + 2 -- The last address on the stack +// +// 1st word -- The number of times this stack was encountered. +// 2nd word -- The size of the stack (StackSize). +// 3rd word -- The first address on the stack. +// ... +// StackSize + 2 -- The last address on the stack +// // The last stack trace is of the form: -// 1st word -- 0 -// 2nd word -- 1 -// 3rd word -- 0 +// +// 1st word -- 0 +// 2nd word -- 1 +// 3rd word -- 0 // // Addresses from stack traces may point to the next instruction after // each call. Optionally adjust by -1 to land somewhere on the actual @@ -861,7 +865,6 @@ func parseThread(b []byte) (*Profile, error) { // Recognize each thread and populate profile samples. for !isMemoryMapSentinel(line) { if strings.HasPrefix(line, "---- no stack trace for") { - line = "" break } if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go index 9978e7330..4b66282cb 100644 --- a/vendor/github.com/google/pprof/profile/merge.go +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -15,6 +15,7 @@ package profile import ( + "encoding/binary" "fmt" "sort" "strconv" @@ -58,7 +59,7 @@ func Merge(srcs []*Profile) (*Profile, error) { for _, src := range srcs { // Clear the profile-specific hash tables - pm.locationsByID = make(map[uint64]*Location, len(src.Location)) + pm.locationsByID = makeLocationIDMap(len(src.Location)) pm.functionsByID = make(map[uint64]*Function, len(src.Function)) pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) @@ -136,7 +137,7 @@ type profileMerger struct { p *Profile // Memoization tables within a profile. - locationsByID map[uint64]*Location + locationsByID locationIDMap functionsByID map[uint64]*Function mappingsByID map[uint64]mapInfo @@ -153,6 +154,16 @@ type mapInfo struct { } func (pm *profileMerger) mapSample(src *Sample) *Sample { + // Check memoization table + k := pm.sampleKey(src) + if ss, ok := pm.samples[k]; ok { + for i, v := range src.Value { + ss.Value[i] += v + } + return ss + } + + // Make new sample. s := &Sample{ Location: make([]*Location, len(src.Location)), Value: make([]int64, len(src.Value)), @@ -177,52 +188,98 @@ func (pm *profileMerger) mapSample(src *Sample) *Sample { s.NumLabel[k] = vv s.NumUnit[k] = uu } - // Check memoization table. Must be done on the remapped location to - // account for the remapped mapping. Add current values to the - // existing sample. - k := s.key() - if ss, ok := pm.samples[k]; ok { - for i, v := range src.Value { - ss.Value[i] += v - } - return ss - } copy(s.Value, src.Value) pm.samples[k] = s pm.p.Sample = append(pm.p.Sample, s) return s } -// key generates sampleKey to be used as a key for maps. -func (sample *Sample) key() sampleKey { - ids := make([]string, len(sample.Location)) - for i, l := range sample.Location { - ids[i] = strconv.FormatUint(l.ID, 16) +func (pm *profileMerger) sampleKey(sample *Sample) sampleKey { + // Accumulate contents into a string. + var buf strings.Builder + buf.Grow(64) // Heuristic to avoid extra allocs + + // encode a number + putNumber := func(v uint64) { + var num [binary.MaxVarintLen64]byte + n := binary.PutUvarint(num[:], v) + buf.Write(num[:n]) + } + + // encode a string prefixed with its length. + putDelimitedString := func(s string) { + putNumber(uint64(len(s))) + buf.WriteString(s) + } + + for _, l := range sample.Location { + // Get the location in the merged profile, which may have a different ID. + if loc := pm.mapLocation(l); loc != nil { + putNumber(loc.ID) + } } + putNumber(0) // Delimiter - labels := make([]string, 0, len(sample.Label)) - for k, v := range sample.Label { - labels = append(labels, fmt.Sprintf("%q%q", k, v)) + for _, l := range sortedKeys1(sample.Label) { + putDelimitedString(l) + values := sample.Label[l] + putNumber(uint64(len(values))) + for _, v := range values { + putDelimitedString(v) + } } - sort.Strings(labels) - numlabels := make([]string, 0, len(sample.NumLabel)) - for k, v := range sample.NumLabel { - numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k])) + for _, l := range sortedKeys2(sample.NumLabel) { + putDelimitedString(l) + values := sample.NumLabel[l] + putNumber(uint64(len(values))) + for _, v := range values { + putNumber(uint64(v)) + } + units := sample.NumUnit[l] + putNumber(uint64(len(units))) + for _, v := range units { + putDelimitedString(v) + } } - sort.Strings(numlabels) - return sampleKey{ - strings.Join(ids, "|"), - strings.Join(labels, ""), - strings.Join(numlabels, ""), + return sampleKey(buf.String()) +} + +type sampleKey string + +// sortedKeys1 returns the sorted keys found in a string->[]string map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys2 and made into a generic function. +func sortedKeys1(m map[string][]string) []string { + if len(m) == 0 { + return nil } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys } -type sampleKey struct { - locations string - labels string - numlabels string +// sortedKeys2 returns the sorted keys found in a string->[]int64 map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys1 and made into a generic function. +func sortedKeys2(m map[string][]int64) []string { + if len(m) == 0 { + return nil + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys } func (pm *profileMerger) mapLocation(src *Location) *Location { @@ -230,7 +287,7 @@ func (pm *profileMerger) mapLocation(src *Location) *Location { return nil } - if l, ok := pm.locationsByID[src.ID]; ok { + if l := pm.locationsByID.get(src.ID); l != nil { return l } @@ -249,10 +306,10 @@ func (pm *profileMerger) mapLocation(src *Location) *Location { // account for the remapped mapping ID. k := l.key() if ll, ok := pm.locations[k]; ok { - pm.locationsByID[src.ID] = ll + pm.locationsByID.set(src.ID, ll) return ll } - pm.locationsByID[src.ID] = l + pm.locationsByID.set(src.ID, l) pm.locations[k] = l pm.p.Location = append(pm.p.Location, l) return l @@ -303,16 +360,17 @@ func (pm *profileMerger) mapMapping(src *Mapping) mapInfo { return mi } m := &Mapping{ - ID: uint64(len(pm.p.Mapping) + 1), - Start: src.Start, - Limit: src.Limit, - Offset: src.Offset, - File: src.File, - BuildID: src.BuildID, - HasFunctions: src.HasFunctions, - HasFilenames: src.HasFilenames, - HasLineNumbers: src.HasLineNumbers, - HasInlineFrames: src.HasInlineFrames, + ID: uint64(len(pm.p.Mapping) + 1), + Start: src.Start, + Limit: src.Limit, + Offset: src.Offset, + File: src.File, + KernelRelocationSymbol: src.KernelRelocationSymbol, + BuildID: src.BuildID, + HasFunctions: src.HasFunctions, + HasFilenames: src.HasFilenames, + HasLineNumbers: src.HasLineNumbers, + HasInlineFrames: src.HasInlineFrames, } pm.p.Mapping = append(pm.p.Mapping, m) @@ -479,3 +537,131 @@ func (p *Profile) compatible(pb *Profile) error { func equalValueType(st1, st2 *ValueType) bool { return st1.Type == st2.Type && st1.Unit == st2.Unit } + +// locationIDMap is like a map[uint64]*Location, but provides efficiency for +// ids that are densely numbered, which is often the case. +type locationIDMap struct { + dense []*Location // indexed by id for id < len(dense) + sparse map[uint64]*Location // indexed by id for id >= len(dense) +} + +func makeLocationIDMap(n int) locationIDMap { + return locationIDMap{ + dense: make([]*Location, n), + sparse: map[uint64]*Location{}, + } +} + +func (lm locationIDMap) get(id uint64) *Location { + if id < uint64(len(lm.dense)) { + return lm.dense[int(id)] + } + return lm.sparse[id] +} + +func (lm locationIDMap) set(id uint64, loc *Location) { + if id < uint64(len(lm.dense)) { + lm.dense[id] = loc + return + } + lm.sparse[id] = loc +} + +// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It +// keeps sample types that appear in all profiles only and drops/reorders the +// sample types as necessary. +// +// In the case of sample types order is not the same for given profiles the +// order is derived from the first profile. +// +// Profiles are modified in-place. +// +// It returns an error if the sample type's intersection is empty. +func CompatibilizeSampleTypes(ps []*Profile) error { + sTypes := commonSampleTypes(ps) + if len(sTypes) == 0 { + return fmt.Errorf("profiles have empty common sample type list") + } + for _, p := range ps { + if err := compatibilizeSampleTypes(p, sTypes); err != nil { + return err + } + } + return nil +} + +// commonSampleTypes returns sample types that appear in all profiles in the +// order how they ordered in the first profile. +func commonSampleTypes(ps []*Profile) []string { + if len(ps) == 0 { + return nil + } + sTypes := map[string]int{} + for _, p := range ps { + for _, st := range p.SampleType { + sTypes[st.Type]++ + } + } + var res []string + for _, st := range ps[0].SampleType { + if sTypes[st.Type] == len(ps) { + res = append(res, st.Type) + } + } + return res +} + +// compatibilizeSampleTypes drops sample types that are not present in sTypes +// list and reorder them if needed. +// +// It sets DefaultSampleType to sType[0] if it is not in sType list. +// +// It assumes that all sample types from the sTypes list are present in the +// given profile otherwise it returns an error. +func compatibilizeSampleTypes(p *Profile, sTypes []string) error { + if len(sTypes) == 0 { + return fmt.Errorf("sample type list is empty") + } + defaultSampleType := sTypes[0] + reMap, needToModify := make([]int, len(sTypes)), false + for i, st := range sTypes { + if st == p.DefaultSampleType { + defaultSampleType = p.DefaultSampleType + } + idx := searchValueType(p.SampleType, st) + if idx < 0 { + return fmt.Errorf("%q sample type is not found in profile", st) + } + reMap[i] = idx + if idx != i { + needToModify = true + } + } + if !needToModify && len(sTypes) == len(p.SampleType) { + return nil + } + p.DefaultSampleType = defaultSampleType + oldSampleTypes := p.SampleType + p.SampleType = make([]*ValueType, len(sTypes)) + for i, idx := range reMap { + p.SampleType[i] = oldSampleTypes[idx] + } + values := make([]int64, len(sTypes)) + for _, s := range p.Sample { + for i, idx := range reMap { + values[i] = s.Value[idx] + } + s.Value = s.Value[:len(values)] + copy(s.Value, values) + } + return nil +} + +func searchValueType(vts []*ValueType, s string) int { + for i, vt := range vts { + if vt.Type == s { + return i + } + } + return -1 +} diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index 2590c8ddb..60ef7e926 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -21,7 +21,6 @@ import ( "compress/gzip" "fmt" "io" - "io/ioutil" "math" "path/filepath" "regexp" @@ -73,9 +72,23 @@ type ValueType struct { type Sample struct { Location []*Location Value []int64 - Label map[string][]string + // Label is a per-label-key map to values for string labels. + // + // In general, having multiple values for the given label key is strongly + // discouraged - see docs for the sample label field in profile.proto. The + // main reason this unlikely state is tracked here is to make the + // decoding->encoding roundtrip not lossy. But we expect that the value + // slices present in this map are always of length 1. + Label map[string][]string + // NumLabel is a per-label-key map to values for numeric labels. See a note + // above on handling multiple values for a label. NumLabel map[string][]int64 - NumUnit map[string][]string + // NumUnit is a per-label-key map to the unit names of corresponding numeric + // label values. The unit info may be missing even if the label is in + // NumLabel, see the docs in profile.proto for details. When the value is + // slice is present and not nil, its length must be equal to the length of + // the corresponding value slice in NumLabel. + NumUnit map[string][]string locationIDX []uint64 labelX []label @@ -106,6 +119,15 @@ type Mapping struct { fileX int64 buildIDX int64 + + // Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File. + // For linux kernel mappings generated by some tools, correct symbolization depends + // on knowing which of the two possible relocation symbols was used for `Start`. + // This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext"). + // + // Note, this public field is not persisted in the proto. For the purposes of + // copying / merging / hashing profiles, it is considered subsumed by `File`. + KernelRelocationSymbol string } // Location corresponds to Profile.Location @@ -144,7 +166,7 @@ type Function struct { // may be a gzip-compressed encoded protobuf or one of many legacy // profile formats which may be unsupported in the future. func Parse(r io.Reader) (*Profile, error) { - data, err := ioutil.ReadAll(r) + data, err := io.ReadAll(r) if err != nil { return nil, err } @@ -159,7 +181,7 @@ func ParseData(data []byte) (*Profile, error) { if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { gz, err := gzip.NewReader(bytes.NewBuffer(data)) if err == nil { - data, err = ioutil.ReadAll(gz) + data, err = io.ReadAll(gz) } if err != nil { return nil, fmt.Errorf("decompressing profile: %v", err) @@ -707,6 +729,35 @@ func (s *Sample) HasLabel(key, value string) bool { return false } +// SetNumLabel sets the specified key to the specified value for all samples in the +// profile. "unit" is a slice that describes the units that each corresponding member +// of "values" is measured in (e.g. bytes or seconds). If there is no relevant +// unit for a given value, that member of "unit" should be the empty string. +// "unit" must either have the same length as "value", or be nil. +func (p *Profile) SetNumLabel(key string, value []int64, unit []string) { + for _, sample := range p.Sample { + if sample.NumLabel == nil { + sample.NumLabel = map[string][]int64{key: value} + } else { + sample.NumLabel[key] = value + } + if sample.NumUnit == nil { + sample.NumUnit = map[string][]string{key: unit} + } else { + sample.NumUnit[key] = unit + } + } +} + +// RemoveNumLabel removes all numerical labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveNumLabel(key string) { + for _, sample := range p.Sample { + delete(sample.NumLabel, key) + delete(sample.NumUnit, key) + } +} + // DiffBaseSample returns true if a sample belongs to the diff base and false // otherwise. func (s *Sample) DiffBaseSample() bool { diff --git a/vendor/github.com/google/pprof/profile/proto.go b/vendor/github.com/google/pprof/profile/proto.go index 539ad3ab3..a15696ba1 100644 --- a/vendor/github.com/google/pprof/profile/proto.go +++ b/vendor/github.com/google/pprof/profile/proto.go @@ -39,11 +39,12 @@ import ( ) type buffer struct { - field int // field tag - typ int // proto wire type code for field - u64 uint64 - data []byte - tmp [16]byte + field int // field tag + typ int // proto wire type code for field + u64 uint64 + data []byte + tmp [16]byte + tmpLines []Line // temporary storage used while decoding "repeated Line". } type decoder func(*buffer, message) error @@ -286,7 +287,6 @@ func decodeInt64s(b *buffer, x *[]int64) error { if b.typ == 2 { // Packed encoding data := b.data - tmp := make([]int64, 0, len(data)) // Maximally sized for len(data) > 0 { var u uint64 var err error @@ -294,9 +294,8 @@ func decodeInt64s(b *buffer, x *[]int64) error { if u, data, err = decodeVarint(data); err != nil { return err } - tmp = append(tmp, int64(u)) + *x = append(*x, int64(u)) } - *x = append(*x, tmp...) return nil } var i int64 @@ -319,7 +318,6 @@ func decodeUint64s(b *buffer, x *[]uint64) error { if b.typ == 2 { data := b.data // Packed encoding - tmp := make([]uint64, 0, len(data)) // Maximally sized for len(data) > 0 { var u uint64 var err error @@ -327,9 +325,8 @@ func decodeUint64s(b *buffer, x *[]uint64) error { if u, data, err = decodeVarint(data); err != nil { return err } - tmp = append(tmp, u) + *x = append(*x, u) } - *x = append(*x, tmp...) return nil } var u uint64 diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go index 02d21a818..b2f9fd546 100644 --- a/vendor/github.com/google/pprof/profile/prune.go +++ b/vendor/github.com/google/pprof/profile/prune.go @@ -62,15 +62,31 @@ func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) { prune := make(map[uint64]bool) pruneBeneath := make(map[uint64]bool) + // simplifyFunc can be expensive, so cache results. + // Note that the same function name can be encountered many times due + // different lines and addresses in the same function. + pruneCache := map[string]bool{} // Map from function to whether or not to prune + pruneFromHere := func(s string) bool { + if r, ok := pruneCache[s]; ok { + return r + } + funcName := simplifyFunc(s) + if dropRx.MatchString(funcName) { + if keepRx == nil || !keepRx.MatchString(funcName) { + pruneCache[s] = true + return true + } + } + pruneCache[s] = false + return false + } + for _, loc := range p.Location { var i int for i = len(loc.Line) - 1; i >= 0; i-- { if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { - funcName := simplifyFunc(fn.Name) - if dropRx.MatchString(funcName) { - if keepRx == nil || !keepRx.MatchString(funcName) { - break - } + if pruneFromHere(fn.Name) { + break } } } diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 55c8ca447..f710a34ec 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,19 @@ This package provides various compression algorithms. # changelog +* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) + * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 + * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 + +* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) + * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 + * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 + * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 + * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 + * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 + * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 + * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + * Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md new file mode 100644 index 000000000..23a43387b --- /dev/null +++ b/vendor/github.com/klauspost/compress/SECURITY.md @@ -0,0 +1,25 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Vulnerability Definition + +A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. + +Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. + +Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. + +It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. + +Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/klaupost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 82882961a..5faea0b2b 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -90,9 +90,8 @@ type advancedState struct { ii uint16 // position of last match, intended to overflow to reset. // input window: unprocessed data is window[index:windowEnd] - index int - estBitsPerByte int - hashMatch [maxMatchLength + minMatchLength]uint32 + index int + hashMatch [maxMatchLength + minMatchLength]uint32 // Input hash chains // hashHead[hashValue] contains the largest inputIndex with the specified hash value diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index 89a5dd89f..f70594c34 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -34,11 +34,6 @@ const ( // Should preferably be a multiple of 6, since // we accumulate 6 bytes between writes to the buffer. bufferFlushSize = 246 - - // bufferSize is the actual output byte buffer size. - // It must have additional headroom for a flush - // which can contain up to 8 bytes. - bufferSize = bufferFlushSize + 8 ) // Minimum length code that emits bits. diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go index 207780299..6c05ba8c1 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go @@ -42,25 +42,6 @@ func quickSortByFreq(data []literalNode, a, b, maxDepth int) { } } -// siftDownByFreq implements the heap property on data[lo, hi). -// first is an offset into the array where the root of the heap lies. -func siftDownByFreq(data []literalNode, lo, hi, first int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && (data[first+child].freq == data[first+child+1].freq && data[first+child].literal < data[first+child+1].literal || data[first+child].freq < data[first+child+1].freq) { - child++ - } - if data[first+root].freq == data[first+child].freq && data[first+root].literal > data[first+child].literal || data[first+root].freq > data[first+child].freq { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) { m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. if hi-lo > 40 { diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index aed2347ce..b4d7164e3 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -13,14 +13,6 @@ type bitWriter struct { out []byte } -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. // It will not check if there is space for them, so the caller must ensure that it has flushed recently. func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 3c0b398c7..54bd08b25 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -253,7 +253,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { switch d.actualTableLog { case 8: - const shift = 8 - 8 + const shift = 0 for br.off >= 4 { br.fillFast() v := dt[uint8(br.value>>(56+shift))] diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 05db94d39..2aa6a95a0 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -87,18 +87,6 @@ func emitCopy(dst []byte, offset, length int) int { return i + 2 } -// extendMatch returns the largest k such that k <= len(src) and that -// src[i:i+k-j] and src[j:k] have the same contents. -// -// It assumes that: -// -// 0 <= i && i < j && j <= len(src) -func extendMatch(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j -} - func hash(u, shift uint32) uint32 { return (u * 0x1e35a7bd) >> shift } diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index 65b38abed..bdd49c8b2 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -304,7 +304,7 @@ import "github.com/klauspost/compress/zstd" // Create a reader that caches decompressors. // For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) +var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) // Decompress a buffer. We don't supply a destination buffer, // so it will be allocated by the decoder. diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 5f272d87f..9f17ce601 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -592,7 +592,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { } seq.fse.setRLE(symb) if debugDecoder { - printf("RLE set to %+v, code: %v", symb, v) + printf("RLE set to 0x%x, code: %v", symb, v) } case compModeFSE: println("Reading table for", tableIndex(i)) diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index 512ffe5b9..55a388553 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -109,7 +109,7 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { } func (r *readerWrapper) readByte() (byte, error) { - n2, err := r.r.Read(r.tmp[:1]) + n2, err := io.ReadFull(r.r, r.tmp[:1]) if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index 07a90dd7a..774c5f00f 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -107,7 +107,7 @@ func WithDecoderDicts(dicts ...[]byte) DOption { } } -// WithEncoderDictRaw registers a dictionary that may be used by the decoder. +// WithDecoderDictRaw registers a dictionary that may be used by the decoder. // The slice content can be arbitrary data. func WithDecoderDictRaw(id uint32, content []byte) DOption { return func(o *decoderOptions) error { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 315b1a8f2..cbc626eec 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -133,8 +133,7 @@ encodeLoop: if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { // Consider history as well. var seq seq - var length int32 - length = 4 + e.matchlen(s+6, repIndex+4, src) + length := 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. @@ -645,8 +644,7 @@ encodeLoop: if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { // Consider history as well. var seq seq - var length int32 - length = 4 + e.matchlen(s+6, repIndex+4, src) + length := 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 50f70533b..faaf81921 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -129,7 +129,7 @@ func WithEncoderPadding(n int) EOption { } // No need to waste our time. if n == 1 { - o.pad = 0 + n = 0 } if n > 1<<30 { return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index cc0aa2274..53e160f7e 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -73,20 +73,20 @@ func (d *frameDec) reset(br byteBuffer) error { switch err { case io.EOF, io.ErrUnexpectedEOF: return io.EOF - default: - return err case nil: signature[0] = b[0] + default: + return err } // Read the rest, don't allow io.ErrUnexpectedEOF b, err = br.readSmall(3) switch err { case io.EOF: return io.EOF - default: - return err case nil: copy(signature[1:], b) + default: + return err } if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go new file mode 100644 index 000000000..f41932b7a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go @@ -0,0 +1,16 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) and len(a) > 0 +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s new file mode 100644 index 000000000..9a7655c0f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -0,0 +1,68 @@ +// Copied from S2 implementation. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func matchLen(a []byte, b []byte) int +// Requires: BMI +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JB matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + TESTQ BX, BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX +#else + BSFQ BX, BX +#endif + SARQ $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JAE matchlen_loopback_standalone + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JB matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL -2(DX), DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JB gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + INCL SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go new file mode 100644 index 000000000..57b9c31c0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -0,0 +1,33 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "math/bits" +) + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + } + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 89396673d..4be7cc736 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -9,7 +9,6 @@ import ( "errors" "log" "math" - "math/bits" ) // enable debug printing @@ -106,27 +105,6 @@ func printf(format string, a ...interface{}) { } } -// matchLen returns the maximum common prefix length of a and b. -// a must be the shortest of the two. -func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) - if diff != 0 { - return n + bits.TrailingZeros64(diff)>>3 - } - n += 8 - } - - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n - -} - func load3232(b []byte, i int32) uint32 { return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) } diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils.go b/vendor/github.com/mistifyio/go-zfs/v3/utils.go index 0c2cce7d9..b69942b53 100644 --- a/vendor/github.com/mistifyio/go-zfs/v3/utils.go +++ b/vendor/github.com/mistifyio/go-zfs/v3/utils.go @@ -37,13 +37,16 @@ func (c *command) Run(arg ...string) ([][]string, error) { cmd.Stderr = &stderr id := uuid.New().String() - joinedArgs := strings.Join(cmd.Args, " ") + joinedArgs := cmd.Path + if len(cmd.Args) > 1 { + joinedArgs = strings.Join(append([]string{cmd.Path}, cmd.Args[1:]...), " ") + } logger.Log([]string{"ID:" + id, "START", joinedArgs}) if err := cmd.Run(); err != nil { return nil, &Error{ Err: err, - Debug: strings.Join([]string{cmd.Path, joinedArgs[1:]}, " "), + Debug: joinedArgs, Stderr: stderr.String(), } } @@ -61,7 +64,7 @@ func (c *command) Run(arg ...string) ([][]string, error) { output := make([][]string, len(lines)) for i, l := range lines { - output[i] = strings.Fields(l) + output[i] = strings.Split(l, "\t") } return output, nil diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go b/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go index ef1beac90..b1ce59656 100644 --- a/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go +++ b/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go @@ -15,5 +15,5 @@ var ( zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio", "fragmentation", "freeing", "leaked"} zpoolPropListOptions = strings.Join(zpoolPropList, ",") - zpoolArgs = []string{"get", "-p", zpoolPropListOptions} + zpoolArgs = []string{"get", "-Hp", zpoolPropListOptions} ) diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go b/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go index c6bf6d87a..f19aebabb 100644 --- a/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go +++ b/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go @@ -15,5 +15,5 @@ var ( zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio"} zpoolPropListOptions = strings.Join(zpoolPropList, ",") - zpoolArgs = []string{"get", "-p", zpoolPropListOptions} + zpoolArgs = []string{"get", "-Hp", zpoolPropListOptions} ) diff --git a/vendor/github.com/mistifyio/go-zfs/v3/zpool.go b/vendor/github.com/mistifyio/go-zfs/v3/zpool.go index 2f7071305..a0bd6471a 100644 --- a/vendor/github.com/mistifyio/go-zfs/v3/zpool.go +++ b/vendor/github.com/mistifyio/go-zfs/v3/zpool.go @@ -49,9 +49,6 @@ func GetZpool(name string) (*Zpool, error) { return nil, err } - // there is no -H - out = out[1:] - z := &Zpool{Name: name} for _, line := range out { if err := z.parseLine(line); err != nil { diff --git a/vendor/github.com/moby/term/doc.go b/vendor/github.com/moby/term/doc.go new file mode 100644 index 000000000..c9bc03244 --- /dev/null +++ b/vendor/github.com/moby/term/doc.go @@ -0,0 +1,3 @@ +// Package term provides structures and helper functions to work with +// terminal (state, sizes). +package term diff --git a/vendor/github.com/moby/term/tc.go b/vendor/github.com/moby/term/tc.go deleted file mode 100644 index 8a5e09f58..000000000 --- a/vendor/github.com/moby/term/tc.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build !windows -// +build !windows - -package term - -import ( - "golang.org/x/sys/unix" -) - -func tcget(fd uintptr) (*Termios, error) { - p, err := unix.IoctlGetTermios(int(fd), getTermios) - if err != nil { - return nil, err - } - return p, nil -} - -func tcset(fd uintptr, p *Termios) error { - return unix.IoctlSetTermios(int(fd), setTermios, p) -} diff --git a/vendor/github.com/moby/term/term.go b/vendor/github.com/moby/term/term.go index 8c1fc1b5a..f9d8988ef 100644 --- a/vendor/github.com/moby/term/term.go +++ b/vendor/github.com/moby/term/term.go @@ -1,119 +1,85 @@ -//go:build !windows -// +build !windows - -// Package term provides structures and helper functions to work with -// terminal (state, sizes). package term -import ( - "errors" - "fmt" - "io" - "os" - "os/signal" - - "golang.org/x/sys/unix" -) +import "io" -// ErrInvalidState is returned if the state of the terminal is invalid. -var ErrInvalidState = errors.New("Invalid terminal state") - -// State represents the state of the terminal. -type State struct { - termios Termios -} +// State holds the platform-specific state / console mode for the terminal. +type State terminalState // Winsize represents the size of the terminal window. type Winsize struct { Height uint16 Width uint16 - x uint16 - y uint16 + + // Only used on Unix + x uint16 + y uint16 } // StdStreams returns the standard streams (stdin, stdout, stderr). +// +// On Windows, it attempts to turn on VT handling on all std handles if +// supported, or falls back to terminal emulation. On Unix, this returns +// the standard [os.Stdin], [os.Stdout] and [os.Stderr]. func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - return os.Stdin, os.Stdout, os.Stderr + return stdStreams() } // GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - var inFd uintptr - var isTerminalIn bool - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminalIn = IsTerminal(inFd) - } - return inFd, isTerminalIn +func GetFdInfo(in interface{}) (fd uintptr, isTerminal bool) { + return getFdInfo(in) +} + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + return getWinsize(fd) +} + +// SetWinsize tries to set the specified window size for the specified file +// descriptor. It is only implemented on Unix, and returns an error on Windows. +func SetWinsize(fd uintptr, ws *Winsize) error { + return setWinsize(fd, ws) } // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { - _, err := tcget(fd) - return err == nil + return isTerminal(fd) } // RestoreTerminal restores the terminal connected to the given file descriptor // to a previous state. func RestoreTerminal(fd uintptr, state *State) error { - if state == nil { - return ErrInvalidState - } - return tcset(fd, &state.termios) + return restoreTerminal(fd, state) } // SaveState saves the state of the terminal connected to the given file descriptor. func SaveState(fd uintptr) (*State, error) { - termios, err := tcget(fd) - if err != nil { - return nil, err - } - return &State{termios: *termios}, nil + return saveState(fd) } // DisableEcho applies the specified state to the terminal connected to the file // descriptor, with echo disabled. func DisableEcho(fd uintptr, state *State) error { - newState := state.termios - newState.Lflag &^= unix.ECHO - - if err := tcset(fd, &newState); err != nil { - return err - } - handleInterrupt(fd, state) - return nil + return disableEcho(fd, state) } // SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this puts both the input -// and output into raw mode. On Windows, it only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (*State, error) { - oldState, err := MakeRaw(fd) - if err != nil { - return nil, err - } - handleInterrupt(fd, oldState) - return oldState, err +// raw mode and returns the previous state. On UNIX, this is the equivalent of +// [MakeRaw], and puts both the input and output into raw mode. On Windows, it +// only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (previousState *State, err error) { + return setRawTerminal(fd) } // SetRawTerminalOutput puts the output of terminal connected to the given file // descriptor into raw mode. On UNIX, this does nothing and returns nil for the // state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (*State, error) { - return nil, nil +func SetRawTerminalOutput(fd uintptr) (previousState *State, err error) { + return setRawTerminalOutput(fd) } -func handleInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - go func() { - for range sigchan { - // quit cleanly and the new terminal item is on a new line - fmt.Println() - signal.Stop(sigchan) - close(sigchan) - RestoreTerminal(fd, state) - os.Exit(1) - } - }() +// MakeRaw puts the terminal (Windows Console) connected to the +// given file descriptor into raw mode and returns the previous state of +// the terminal so that it can be restored. +func MakeRaw(fd uintptr) (previousState *State, err error) { + return makeRaw(fd) } diff --git a/vendor/github.com/moby/term/term_unix.go b/vendor/github.com/moby/term/term_unix.go new file mode 100644 index 000000000..2ec7706a1 --- /dev/null +++ b/vendor/github.com/moby/term/term_unix.go @@ -0,0 +1,98 @@ +//go:build !windows +// +build !windows + +package term + +import ( + "errors" + "io" + "os" + + "golang.org/x/sys/unix" +) + +// ErrInvalidState is returned if the state of the terminal is invalid. +// +// Deprecated: ErrInvalidState is no longer used. +var ErrInvalidState = errors.New("Invalid terminal state") + +// terminalState holds the platform-specific state / console mode for the terminal. +type terminalState struct { + termios unix.Termios +} + +func stdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + return os.Stdin, os.Stdout, os.Stderr +} + +func getFdInfo(in interface{}) (uintptr, bool) { + var inFd uintptr + var isTerminalIn bool + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = isTerminal(inFd) + } + return inFd, isTerminalIn +} + +func getWinsize(fd uintptr) (*Winsize, error) { + uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) + ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel} + return ws, err +} + +func setWinsize(fd uintptr, ws *Winsize) error { + return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &unix.Winsize{ + Row: ws.Height, + Col: ws.Width, + Xpixel: ws.x, + Ypixel: ws.y, + }) +} + +func isTerminal(fd uintptr) bool { + _, err := tcget(fd) + return err == nil +} + +func restoreTerminal(fd uintptr, state *State) error { + if state == nil { + return errors.New("invalid terminal state") + } + return tcset(fd, &state.termios) +} + +func saveState(fd uintptr) (*State, error) { + termios, err := tcget(fd) + if err != nil { + return nil, err + } + return &State{termios: *termios}, nil +} + +func disableEcho(fd uintptr, state *State) error { + newState := state.termios + newState.Lflag &^= unix.ECHO + + return tcset(fd, &newState) +} + +func setRawTerminal(fd uintptr) (*State, error) { + return makeRaw(fd) +} + +func setRawTerminalOutput(fd uintptr) (*State, error) { + return nil, nil +} + +func tcget(fd uintptr) (*unix.Termios, error) { + p, err := unix.IoctlGetTermios(int(fd), getTermios) + if err != nil { + return nil, err + } + return p, nil +} + +func tcset(fd uintptr, p *unix.Termios) error { + return unix.IoctlSetTermios(int(fd), setTermios, p) +} diff --git a/vendor/github.com/moby/term/term_windows.go b/vendor/github.com/moby/term/term_windows.go index 3cdc8edbd..81ccff042 100644 --- a/vendor/github.com/moby/term/term_windows.go +++ b/vendor/github.com/moby/term/term_windows.go @@ -1,6 +1,7 @@ package term import ( + "fmt" "io" "os" "os/signal" @@ -9,22 +10,15 @@ import ( "golang.org/x/sys/windows" ) -// State holds the console mode for the terminal. -type State struct { +// terminalState holds the platform-specific state / console mode for the terminal. +type terminalState struct { mode uint32 } -// Winsize is used for window size. -type Winsize struct { - Height uint16 - Width uint16 -} - // vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console var vtInputSupported bool -// StdStreams returns the standard streams (stdin, stdout, stderr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { +func stdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { // Turn on VT handling on all std handles, if possible. This might // fail, in which case we will fall back to terminal emulation. var ( @@ -87,16 +81,14 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { stdErr = os.Stderr } - return + return stdIn, stdOut, stdErr } -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { +func getFdInfo(in interface{}) (uintptr, bool) { return windowsconsole.GetHandleInfo(in) } -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { +func getWinsize(fd uintptr) (*Winsize, error) { var info windows.ConsoleScreenBufferInfo if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil { return nil, err @@ -110,21 +102,21 @@ func GetWinsize(fd uintptr) (*Winsize, error) { return winsize, nil } -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { +func setWinsize(fd uintptr, ws *Winsize) error { + return fmt.Errorf("not implemented on Windows") +} + +func isTerminal(fd uintptr) bool { var mode uint32 err := windows.GetConsoleMode(windows.Handle(fd), &mode) return err == nil } -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { +func restoreTerminal(fd uintptr, state *State) error { return windows.SetConsoleMode(windows.Handle(fd), state.mode) } -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { +func saveState(fd uintptr) (*State, error) { var mode uint32 if err := windows.GetConsoleMode(windows.Handle(fd), &mode); err != nil { @@ -134,9 +126,8 @@ func SaveState(fd uintptr) (*State, error) { return &State{mode: mode}, nil } -// DisableEcho disables echo for the terminal connected to the given file descriptor. -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx -func DisableEcho(fd uintptr, state *State) error { +func disableEcho(fd uintptr, state *State) error { + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx mode := state.mode mode &^= windows.ENABLE_ECHO_INPUT mode |= windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT @@ -150,69 +141,27 @@ func DisableEcho(fd uintptr, state *State) error { return nil } -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this puts both the input -// and output into raw mode. On Windows, it only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (*State, error) { - state, err := MakeRaw(fd) +func setRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) if err != nil { return nil, err } // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return state, err + restoreAtInterrupt(fd, oldState) + return oldState, err } -// SetRawTerminalOutput puts the output of terminal connected to the given file -// descriptor into raw mode. On UNIX, this does nothing and returns nil for the -// state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (*State, error) { - state, err := SaveState(fd) +func setRawTerminalOutput(fd uintptr) (*State, error) { + oldState, err := saveState(fd) if err != nil { return nil, err } // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this // version of Windows. - _ = windows.SetConsoleMode(windows.Handle(fd), state.mode|windows.DISABLE_NEWLINE_AUTO_RETURN) - return state, err -} - -// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be restored. -func MakeRaw(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - mode := state.mode - - // See - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - - // Disable these modes - mode &^= windows.ENABLE_ECHO_INPUT - mode &^= windows.ENABLE_LINE_INPUT - mode &^= windows.ENABLE_MOUSE_INPUT - mode &^= windows.ENABLE_WINDOW_INPUT - mode &^= windows.ENABLE_PROCESSED_INPUT - - // Enable these modes - mode |= windows.ENABLE_EXTENDED_FLAGS - mode |= windows.ENABLE_INSERT_MODE - mode |= windows.ENABLE_QUICK_EDIT_MODE - if vtInputSupported { - mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT - } - - err = windows.SetConsoleMode(windows.Handle(fd), mode) - if err != nil { - return nil, err - } - return state, nil + _ = windows.SetConsoleMode(windows.Handle(fd), oldState.mode|windows.DISABLE_NEWLINE_AUTO_RETURN) + return oldState, err } func restoreAtInterrupt(fd uintptr, state *State) { diff --git a/vendor/github.com/moby/term/termios.go b/vendor/github.com/moby/term/termios_unix.go similarity index 50% rename from vendor/github.com/moby/term/termios.go rename to vendor/github.com/moby/term/termios_unix.go index 99c0f7de6..60c823783 100644 --- a/vendor/github.com/moby/term/termios.go +++ b/vendor/github.com/moby/term/termios_unix.go @@ -8,12 +8,11 @@ import ( ) // Termios is the Unix API for terminal I/O. +// +// Deprecated: use [unix.Termios]. type Termios = unix.Termios -// MakeRaw puts the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { +func makeRaw(fd uintptr) (*State, error) { termios, err := tcget(fd) if err != nil { return nil, err @@ -21,10 +20,10 @@ func MakeRaw(fd uintptr) (*State, error) { oldState := State{termios: *termios} - termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON termios.Oflag &^= unix.OPOST - termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) - termios.Cflag &^= (unix.CSIZE | unix.PARENB) + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN + termios.Cflag &^= unix.CSIZE | unix.PARENB termios.Cflag |= unix.CS8 termios.Cc[unix.VMIN] = 1 termios.Cc[unix.VTIME] = 0 diff --git a/vendor/github.com/moby/term/termios_windows.go b/vendor/github.com/moby/term/termios_windows.go new file mode 100644 index 000000000..5be4e7601 --- /dev/null +++ b/vendor/github.com/moby/term/termios_windows.go @@ -0,0 +1,37 @@ +package term + +import "golang.org/x/sys/windows" + +func makeRaw(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + mode := state.mode + + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + + // Disable these modes + mode &^= windows.ENABLE_ECHO_INPUT + mode &^= windows.ENABLE_LINE_INPUT + mode &^= windows.ENABLE_MOUSE_INPUT + mode &^= windows.ENABLE_WINDOW_INPUT + mode &^= windows.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= windows.ENABLE_EXTENDED_FLAGS + mode |= windows.ENABLE_INSERT_MODE + mode |= windows.ENABLE_QUICK_EDIT_MODE + if vtInputSupported { + mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT + } + + err = windows.SetConsoleMode(windows.Handle(fd), mode) + if err != nil { + return nil, err + } + return state, nil +} diff --git a/vendor/github.com/moby/term/windows/ansi_reader.go b/vendor/github.com/moby/term/windows/ansi_reader.go index f32aa537e..fb34c547a 100644 --- a/vendor/github.com/moby/term/windows/ansi_reader.go +++ b/vendor/github.com/moby/term/windows/ansi_reader.go @@ -195,10 +195,10 @@ func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) stri // +Key generates ESC N Key if !control && alt { - return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) + return ansiterm.KEY_ESC_N + strings.ToLower(string(rune(keyEvent.UnicodeChar))) } - return string(keyEvent.UnicodeChar) + return string(rune(keyEvent.UnicodeChar)) } // formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. diff --git a/vendor/github.com/moby/term/windows/console.go b/vendor/github.com/moby/term/windows/console.go index 116b74e8f..21e57bd52 100644 --- a/vendor/github.com/moby/term/windows/console.go +++ b/vendor/github.com/moby/term/windows/console.go @@ -30,8 +30,11 @@ func GetHandleInfo(in interface{}) (uintptr, bool) { // IsConsole returns true if the given file descriptor is a Windows Console. // The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. -// Deprecated: use golang.org/x/sys/windows.GetConsoleMode() or golang.org/x/term.IsTerminal() -var IsConsole = isConsole +// +// Deprecated: use [windows.GetConsoleMode] or [golang.org/x/term.IsTerminal]. +func IsConsole(fd uintptr) bool { + return isConsole(fd) +} func isConsole(fd uintptr) bool { var mode uint32 diff --git a/vendor/github.com/moby/term/winsize.go b/vendor/github.com/moby/term/winsize.go deleted file mode 100644 index bea8d4595..000000000 --- a/vendor/github.com/moby/term/winsize.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build !windows -// +build !windows - -package term - -import ( - "golang.org/x/sys/unix" -) - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) - ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel} - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - uws := &unix.Winsize{Row: ws.Height, Col: ws.Width, Xpixel: ws.x, Ypixel: ws.y} - return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, uws) -} diff --git a/vendor/github.com/onsi/ginkgo/v2/.gitignore b/vendor/github.com/onsi/ginkgo/v2/.gitignore index edf0231cd..18793c248 100644 --- a/vendor/github.com/onsi/ginkgo/v2/.gitignore +++ b/vendor/github.com/onsi/ginkgo/v2/.gitignore @@ -1,5 +1,5 @@ .DS_Store -TODO.md +TODO tmp/**/* *.coverprofile .vscode diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 5e779fe64..cb72bd6f2 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,86 @@ +## 2.11.0 + +In prior versions of Ginkgo specs the CLI filter flags (e.g. `--focus`, `--label-filter`) would _override_ any programmatic focus. This behavior has proved surprising and confusing in at least the following ways: + +- users cannot combine programmatic filters and CLI filters to more efficiently select subsets of tests +- CLI filters can override programmatic focus on CI systems resulting in an exit code of 0 despite the presence of (incorrectly!) committed focused specs. + +Going forward Ginkgo will AND all programmatic and CLI filters. Moreover, the presence of any programmatic focused tests will always result in a non-zero exit code. + +This change is technically a change in Ginkgo's external contract and may require some users to make changes to successfully adopt. Specifically: it's possible some users were intentionally using CLI filters to override programmatic focus. If this is you please open an issue so we can explore solutions to the underlying problem you are trying to solve. + +### Fixes +- Programmatic focus is no longer overwrriten by CLI filters [d6bba86] + +### Maintenance +- Bump github.com/onsi/gomega from 1.27.7 to 1.27.8 (#1218) [4a70a38] +- Bump golang.org/x/sys from 0.8.0 to 0.9.0 (#1219) [97eda4d] + +## 2.10.0 + +### Features +- feat(ginkgo/generators): add --tags flag (#1216) [a782a77] + adds a new --tags flag to ginkgo generate + +### Fixes +- Fix broken link of MIGRATING_TO_V2.md (#1217) [548d78e] + +### Maintenance +- Bump golang.org/x/tools from 0.9.1 to 0.9.3 (#1215) [2b76a5e] + +## 2.9.7 + +### Fixes +- fix race when multiple defercleanups are called in goroutines [07fc3a0] + +## 2.9.6 + +### Fixes +- fix: create parent directory before report files (#1212) [0ac65de] + +### Maintenance +- Bump github.com/onsi/gomega from 1.27.6 to 1.27.7 (#1202) [3e39231] + +## 2.9.5 + +### Fixes +- ensure the correct deterministic sort order is produced when ordered specs are generated by a helper function [7fa0b6b] + +### Maintenance +- fix generators link (#1200) [9f9d8b9] +- Bump golang.org/x/tools from 0.8.0 to 0.9.1 (#1196) [150e3f2] +- fix spelling err in docs (#1199) [0013b1a] +- Bump golang.org/x/sys from 0.7.0 to 0.8.0 (#1193) [9e9e3e5] + +## 2.9.4 + +### Fixes +- fix hang with ginkgo -p (#1192) [15d4bdc] - this addresses a _long_ standing issue related to Ginkgo hanging when a child process spawned by the test does not exit. + +- fix: fail fast may cause Serial spec or cleanup Node interrupted (#1178) [8dea88b] - prior to this there was a small gap in which specs on other processes might start even if one process has tried to abort the suite. + + +### Maintenance +- Document run order when multiple setup nodes are at the same nesting level [903be81] + +## 2.9.3 + +### Features +- Add RenderTimeline to GinkgoT() [c0c77b6] + +### Fixes +- update Measure deprecation message. fixes #1176 [227c662] +- add newlines to GinkgoLogr (#1170) (#1171) [0de0e7c] + +### Maintenance +- Bump commonmarker from 0.23.8 to 0.23.9 in /docs (#1183) [8b925ab] +- Bump nokogiri from 1.14.1 to 1.14.3 in /docs (#1184) [e3795a4] +- Bump golang.org/x/tools from 0.7.0 to 0.8.0 (#1182) [b453793] +- Bump actions/setup-go from 3 to 4 (#1164) [73ed75b] +- Bump github.com/onsi/gomega from 1.27.4 to 1.27.6 (#1173) [0a2bc64] +- Bump github.com/go-logr/logr from 1.2.3 to 1.2.4 (#1174) [f41c557] +- Bump golang.org/x/sys from 0.6.0 to 0.7.0 (#1179) [8e423e5] + ## 2.9.2 ### Maintenance diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go index 48d23f919..be01dec97 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -32,6 +32,9 @@ func BuildGenerateCommand() command.Command { {Name: "template-data", KeyPath: "CustomTemplateData", UsageArgument: "template-data-file", Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"}, + {Name: "tags", KeyPath: "Tags", + UsageArgument: "build-tags", + Usage: "If specified, generate will create a test file that uses the given build tags (i.e. `--tags e2e,!unit` will add `//go:build e2e,!unit`)"}, }, &conf, types.GinkgoFlagSections{}, @@ -59,6 +62,7 @@ You can also pass a of the form "file.go" and generate will emit "fil } type specData struct { + BuildTags string Package string Subject string PackageImportPath string @@ -93,6 +97,7 @@ func generateTestFileForSubject(subject string, conf GeneratorsConfig) { } data := specData{ + BuildTags: getBuildTags(conf.Tags), Package: determinePackageName(packageName, conf.Internal), Subject: formattedName, PackageImportPath: getPackageImportPath(), diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go index c3470adbf..4dab07d03 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go @@ -1,6 +1,7 @@ package generators -var specText = `package {{.Package}} +var specText = `{{.BuildTags}} +package {{.Package}} import ( {{.GinkgoImport}} @@ -14,7 +15,8 @@ var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { }) ` -var agoutiSpecText = `package {{.Package}} +var agoutiSpecText = `{{.BuildTags}} +package {{.Package}} import ( {{.GinkgoImport}} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go index 3046a4487..28c7aa6f4 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go @@ -1,6 +1,7 @@ package generators import ( + "fmt" "go/build" "os" "path/filepath" @@ -14,6 +15,7 @@ type GeneratorsConfig struct { Agouti, NoDot, Internal bool CustomTemplate string CustomTemplateData string + Tags string } func getPackageAndFormattedName() (string, string, string) { @@ -62,3 +64,13 @@ func determinePackageName(name string, internal bool) string { return name + "_test" } + +// getBuildTags returns the resultant string to be added. +// If the input string is not empty, then returns a `//go:build {}` string, +// otherwise returns an empty string. +func getBuildTags(tags string) string { + if tags != "" { + return fmt.Sprintf("//go:build %s\n", tags) + } + return "" +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go index 17073145f..28447ffdd 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -80,6 +80,9 @@ type FullGinkgoTInterface interface { Fi(indentation uint, format string, args ...any) string Fiw(indentation uint, maxWidth uint, format string, args ...any) string + //Generates a formatted string version of the current spec's timeline + RenderTimeline() string + GinkgoRecover() DeferCleanup(args ...any) diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/focus.go b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go index 966ea0c1a..e3da7d14d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/focus.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go @@ -8,22 +8,22 @@ import ( ) /* - If a container marked as focus has a descendant that is also marked as focus, Ginkgo's policy is to - unmark the container's focus. This gives developers a more intuitive experience when debugging specs. - It is common to focus a container to just run a subset of specs, then identify the specific specs within the container to focus - - this policy allows the developer to simply focus those specific specs and not need to go back and turn the focus off of the container: - - As a common example, consider: - - FDescribe("something to debug", function() { - It("works", function() {...}) - It("works", function() {...}) - FIt("doesn't work", function() {...}) - It("works", function() {...}) - }) - - here the developer's intent is to focus in on the `"doesn't work"` spec and not to run the adjacent specs in the focused `"something to debug"` container. - The nested policy applied by this function enables this behavior. +If a container marked as focus has a descendant that is also marked as focus, Ginkgo's policy is to +unmark the container's focus. This gives developers a more intuitive experience when debugging specs. +It is common to focus a container to just run a subset of specs, then identify the specific specs within the container to focus - +this policy allows the developer to simply focus those specific specs and not need to go back and turn the focus off of the container: + +As a common example, consider: + + FDescribe("something to debug", function() { + It("works", function() {...}) + It("works", function() {...}) + FIt("doesn't work", function() {...}) + It("works", function() {...}) + }) + +here the developer's intent is to focus in on the `"doesn't work"` spec and not to run the adjacent specs in the focused `"something to debug"` container. +The nested policy applied by this function enables this behavior. */ func ApplyNestedFocusPolicyToTree(tree *TreeNode) { var walkTree func(tree *TreeNode) bool @@ -44,46 +44,43 @@ func ApplyNestedFocusPolicyToTree(tree *TreeNode) { } /* - Ginkgo supports focussing specs using `FIt`, `FDescribe`, etc. - this is called "programmatic focus" - It also supports focussing specs using regular expressions on the command line (`-focus=`, `-skip=`) that match against spec text - and file filters (`-focus-files=`, `-skip-files=`) that match against code locations for nodes in specs. +Ginkgo supports focussing specs using `FIt`, `FDescribe`, etc. - this is called "programmatic focus" +It also supports focussing specs using regular expressions on the command line (`-focus=`, `-skip=`) that match against spec text and file filters (`-focus-files=`, `-skip-files=`) that match against code locations for nodes in specs. - If any of the CLI flags are provided they take precedence. The file filters run first followed by the regex filters. +When both programmatic and file filters are provided their results are ANDed together. If multiple kinds of filters are provided, the file filters run first followed by the regex filters. - This function sets the `Skip` property on specs by applying Ginkgo's focus policy: - - If there are no CLI arguments and no programmatic focus, do nothing. - - If there are no CLI arguments but a spec somewhere has programmatic focus, skip any specs that have no programmatic focus. - - If there are CLI arguments parse them and skip any specs that either don't match the focus filters or do match the skip filters. +This function sets the `Skip` property on specs by applying Ginkgo's focus policy: +- If there are no CLI arguments and no programmatic focus, do nothing. +- If a spec somewhere has programmatic focus skip any specs that have no programmatic focus. +- If there are CLI arguments parse them and skip any specs that either don't match the focus filters or do match the skip filters. - *Note:* specs with pending nodes are Skipped when created by NewSpec. +*Note:* specs with pending nodes are Skipped when created by NewSpec. */ func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) { focusString := strings.Join(suiteConfig.FocusStrings, "|") skipString := strings.Join(suiteConfig.SkipStrings, "|") - hasFocusCLIFlags := focusString != "" || skipString != "" || len(suiteConfig.SkipFiles) > 0 || len(suiteConfig.FocusFiles) > 0 || suiteConfig.LabelFilter != "" - type SkipCheck func(spec Spec) bool // by default, skip any specs marked pending skipChecks := []SkipCheck{func(spec Spec) bool { return spec.Nodes.HasNodeMarkedPending() }} hasProgrammaticFocus := false - if !hasFocusCLIFlags { - // check for programmatic focus - for _, spec := range specs { - if spec.Nodes.HasNodeMarkedFocus() && !spec.Nodes.HasNodeMarkedPending() { - skipChecks = append(skipChecks, func(spec Spec) bool { return !spec.Nodes.HasNodeMarkedFocus() }) - hasProgrammaticFocus = true - break - } + for _, spec := range specs { + if spec.Nodes.HasNodeMarkedFocus() && !spec.Nodes.HasNodeMarkedPending() { + hasProgrammaticFocus = true + break } } + if hasProgrammaticFocus { + skipChecks = append(skipChecks, func(spec Spec) bool { return !spec.Nodes.HasNodeMarkedFocus() }) + } + if suiteConfig.LabelFilter != "" { labelFilter, _ := types.ParseLabelFilter(suiteConfig.LabelFilter) - skipChecks = append(skipChecks, func(spec Spec) bool { - return !labelFilter(UnionOfLabels(suiteLabels, spec.Nodes.UnionOfLabels())) + skipChecks = append(skipChecks, func(spec Spec) bool { + return !labelFilter(UnionOfLabels(suiteLabels, spec.Nodes.UnionOfLabels())) }) } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go index ac6f51040..8ed86111f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go @@ -10,7 +10,7 @@ import ( "github.com/onsi/ginkgo/v2/internal/parallel_support" ) -const ABORT_POLLING_INTERVAL = 500 * time.Millisecond +var ABORT_POLLING_INTERVAL = 500 * time.Millisecond type InterruptCause uint @@ -62,13 +62,14 @@ type InterruptHandlerInterface interface { } type InterruptHandler struct { - c chan interface{} - lock *sync.Mutex - level InterruptLevel - cause InterruptCause - client parallel_support.Client - stop chan interface{} - signals []os.Signal + c chan interface{} + lock *sync.Mutex + level InterruptLevel + cause InterruptCause + client parallel_support.Client + stop chan interface{} + signals []os.Signal + requestAbortCheck chan interface{} } func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler { @@ -76,11 +77,12 @@ func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) * signals = []os.Signal{os.Interrupt, syscall.SIGTERM} } handler := &InterruptHandler{ - c: make(chan interface{}), - lock: &sync.Mutex{}, - stop: make(chan interface{}), - client: client, - signals: signals, + c: make(chan interface{}), + lock: &sync.Mutex{}, + stop: make(chan interface{}), + requestAbortCheck: make(chan interface{}), + client: client, + signals: signals, } handler.registerForInterrupts() return handler @@ -109,6 +111,12 @@ func (handler *InterruptHandler) registerForInterrupts() { pollTicker.Stop() return } + case <-handler.requestAbortCheck: + if handler.client.ShouldAbort() { + close(abortChannel) + pollTicker.Stop() + return + } case <-handler.stop: pollTicker.Stop() return @@ -152,11 +160,18 @@ func (handler *InterruptHandler) registerForInterrupts() { func (handler *InterruptHandler) Status() InterruptStatus { handler.lock.Lock() - defer handler.lock.Unlock() - - return InterruptStatus{ + status := InterruptStatus{ Level: handler.level, Channel: handler.c, Cause: handler.cause, } + handler.lock.Unlock() + + if handler.client != nil && handler.client.ShouldAbort() && !status.Interrupted() { + close(handler.requestAbortCheck) + <-status.Channel + return handler.Status() + } + + return status } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go index 0869bffb3..14c7cf54e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/node.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -875,6 +875,15 @@ func (n Nodes) FirstNodeMarkedOrdered() Node { return Node{} } +func (n Nodes) IndexOfFirstNodeMarkedOrdered() int { + for i := range n { + if n[i].MarkedOrdered { + return i + } + } + return -1 +} + func (n Nodes) GetMaxFlakeAttempts() int { maxFlakeAttempts := 0 for i := range n { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go index 7ed43c7fd..84eea0a59 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go @@ -27,36 +27,43 @@ func (s *SortableSpecs) Swap(i, j int) { s.Indexes[i], s.Indexes[j] = s.Indexes[ func (s *SortableSpecs) Less(i, j int) bool { a, b := s.Specs[s.Indexes[i]], s.Specs[s.Indexes[j]] - firstOrderedA := a.Nodes.FirstNodeMarkedOrdered() - firstOrderedB := b.Nodes.FirstNodeMarkedOrdered() - if firstOrderedA.ID == firstOrderedB.ID && !firstOrderedA.IsZero() { - // strictly preserve order in ordered containers. ID will track this as IDs are generated monotonically - return a.FirstNodeWithType(types.NodeTypeIt).ID < b.FirstNodeWithType(types.NodeTypeIt).ID + aNodes, bNodes := a.Nodes.WithType(types.NodeTypesForContainerAndIt), b.Nodes.WithType(types.NodeTypesForContainerAndIt) + + firstOrderedAIdx, firstOrderedBIdx := aNodes.IndexOfFirstNodeMarkedOrdered(), bNodes.IndexOfFirstNodeMarkedOrdered() + if firstOrderedAIdx > -1 && firstOrderedBIdx > -1 && aNodes[firstOrderedAIdx].ID == bNodes[firstOrderedBIdx].ID { + // strictly preserve order within an ordered containers. ID will track this as IDs are generated monotonically + return aNodes.FirstNodeWithType(types.NodeTypeIt).ID < bNodes.FirstNodeWithType(types.NodeTypeIt).ID + } + + // if either spec is in an ordered container - only use the nodes up to the outermost ordered container + if firstOrderedAIdx > -1 { + aNodes = aNodes[:firstOrderedAIdx+1] + } + if firstOrderedBIdx > -1 { + bNodes = bNodes[:firstOrderedBIdx+1] } - aCLs := a.Nodes.WithType(types.NodeTypesForContainerAndIt).CodeLocations() - bCLs := b.Nodes.WithType(types.NodeTypesForContainerAndIt).CodeLocations() - for i := 0; i < len(aCLs) && i < len(bCLs); i++ { - aCL, bCL := aCLs[i], bCLs[i] - if aCL.FileName < bCL.FileName { - return true - } else if aCL.FileName > bCL.FileName { - return false + for i := 0; i < len(aNodes) && i < len(bNodes); i++ { + aCL, bCL := aNodes[i].CodeLocation, bNodes[i].CodeLocation + if aCL.FileName != bCL.FileName { + return aCL.FileName < bCL.FileName } - if aCL.LineNumber < bCL.LineNumber { - return true - } else if aCL.LineNumber > bCL.LineNumber { - return false + if aCL.LineNumber != bCL.LineNumber { + return aCL.LineNumber < bCL.LineNumber } } // either everything is equal or we have different lengths of CLs - if len(aCLs) < len(bCLs) { - return true - } else if len(aCLs) > len(bCLs) { - return false + if len(aNodes) != len(bNodes) { + return len(aNodes) < len(bNodes) } // ok, now we are sure everything was equal. so we use the spec text to break ties - return a.Text() < b.Text() + for i := 0; i < len(aNodes); i++ { + if aNodes[i].Text != bNodes[i].Text { + return aNodes[i].Text < bNodes[i].Text + } + } + // ok, all those texts were equal. we'll use the ID of the most deeply nested node as a last resort + return aNodes[len(aNodes)-1].ID < bNodes[len(bNodes)-1].ID } type GroupedSpecIndices []SpecIndices diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go index f5ae15b8b..8a237f446 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go @@ -26,6 +26,17 @@ func (impl *dupSyscallOutputInterceptorImpl) CreateStdoutStderrClones() (*os.Fil stdoutCloneFD, _ := unix.Dup(1) stderrCloneFD, _ := unix.Dup(2) + // Important, set the fds to FD_CLOEXEC to prevent them leaking into childs + // https://github.com/onsi/ginkgo/issues/1191 + flags, err := unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_GETFD, 0) + if err == nil { + unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC) + } + flags, err = unix.FcntlInt(uintptr(stderrCloneFD), unix.F_GETFD, 0) + if err == nil { + unix.FcntlInt(uintptr(stderrCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC) + } + // And then wrap the clone file descriptors in files. // One benefit of this (that we don't use yet) is that we can actually write // to these files to emit output to the console even though we're intercepting output diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index a1dbd4c62..ea0d259d9 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -245,7 +245,9 @@ func (suite *Suite) pushCleanupNode(node Node) error { node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID node.NestingLevel = suite.currentNode.NestingLevel + suite.selectiveLock.Lock() suite.cleanupNodes = append(suite.cleanupNodes, node) + suite.selectiveLock.Unlock() return nil } @@ -937,6 +939,12 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ gracePeriodChannel = time.After(gracePeriod) case <-interruptStatus.Channel: interruptStatus = suite.interruptHandler.Status() + // ignore interruption from other process if we are cleaning up or reporting + if interruptStatus.Cause == interrupt_handler.InterruptCauseAbortByOtherProcess && + node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) { + continue + } + deadlineChannel = nil // don't worry about deadlines, time's up now failureTimelineLocation := suite.generateTimelineLocation() diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go index 92acc0a00..73e265565 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go @@ -7,6 +7,7 @@ import ( "github.com/onsi/ginkgo/v2/formatter" "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" ) @@ -185,6 +186,9 @@ func (t *ginkgoTestingTProxy) Fi(indentation uint, format string, args ...any) s func (t *ginkgoTestingTProxy) Fiw(indentation uint, maxWidth uint, format string, args ...any) string { return t.f.Fiw(indentation, maxWidth, format, args...) } +func (t *ginkgoTestingTProxy) RenderTimeline() string { + return reporters.RenderTimeline(t.report(), false) +} func (t *ginkgoTestingTProxy) GinkgoRecover() { t.ginkgoRecover() } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go index 28a45b0fa..574f172df 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go @@ -135,6 +135,6 @@ func (w *Writer) Println(a ...interface{}) { func GinkgoLogrFunc(writer *Writer) logr.Logger { return funcr.New(func(prefix, args string) { - writer.Printf("%s", args) + writer.Printf("%s\n", args) }, funcr.Options{}) } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go index 7f96c450f..be506f9b4 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go @@ -4,12 +4,16 @@ import ( "encoding/json" "fmt" "os" + "path" "github.com/onsi/ginkgo/v2/types" ) -//GenerateJSONReport produces a JSON-formatted report at the passed in destination +// GenerateJSONReport produces a JSON-formatted report at the passed in destination func GenerateJSONReport(report types.Report, destination string) error { + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return err + } f, err := os.Create(destination) if err != nil { return err @@ -25,8 +29,8 @@ func GenerateJSONReport(report types.Report, destination string) error { return f.Close() } -//MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources -//It skips over reports that fail to decode but reports on them via the returned messages []string +// MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources +// It skips over reports that fail to decode but reports on them via the returned messages []string func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) { messages := []string{} allReports := []types.Report{} @@ -46,6 +50,9 @@ func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, allReports = append(allReports, reports...) } + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return messages, err + } f, err := os.Create(destination) if err != nil { return messages, err diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index ca98609d0..816042208 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -14,6 +14,7 @@ import ( "encoding/xml" "fmt" "os" + "path" "strings" "github.com/onsi/ginkgo/v2/config" @@ -285,6 +286,9 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit TestSuites: []JUnitTestSuite{suite}, } + if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { + return err + } f, err := os.Create(dst) if err != nil { return err @@ -322,6 +326,9 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...) } + if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { + return messages, err + } f, err := os.Create(dst) if err != nil { return messages, err @@ -344,8 +351,12 @@ func failureDescriptionForUnstructuredReporters(spec types.SpecReport) string { } func systemErrForUnstructuredReporters(spec types.SpecReport) string { + return RenderTimeline(spec, true) +} + +func RenderTimeline(spec types.SpecReport, noColor bool) string { out := &strings.Builder{} - NewDefaultReporter(types.ReporterConfig{NoColor: true, VeryVerbose: true}, out).emitTimeline(0, spec, spec.Timeline()) + NewDefaultReporter(types.ReporterConfig{NoColor: noColor, VeryVerbose: true}, out).emitTimeline(0, spec, spec.Timeline()) return out.String() } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go index c1863496d..e990ad82e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go @@ -11,6 +11,7 @@ package reporters import ( "fmt" "os" + "path" "strings" "github.com/onsi/ginkgo/v2/types" @@ -27,6 +28,9 @@ func tcEscape(s string) string { } func GenerateTeamcityReport(report types.Report, dst string) error { + if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { + return err + } f, err := os.Create(dst) if err != nil { return err diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go index f267bdefd..e2519f673 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go @@ -38,7 +38,7 @@ func (d deprecations) Async() Deprecation { func (d deprecations) Measure() Deprecation { return Deprecation{ - Message: "Measure is deprecated and will be removed in Ginkgo V2. Please migrate to gomega/gmeasure.", + Message: "Measure is deprecated and has been removed from Ginkgo V2. Any Measure tests in your spec will not run. Please migrate to gomega/gmeasure.", DocLink: "removed-measure", Version: "1.16.3", } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 8e7f7404f..f895739b8 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.9.2" +const VERSION = "2.11.0" diff --git a/vendor/github.com/onsi/gomega/.gitignore b/vendor/github.com/onsi/gomega/.gitignore index 52266eae1..425d0a509 100644 --- a/vendor/github.com/onsi/gomega/.gitignore +++ b/vendor/github.com/onsi/gomega/.gitignore @@ -3,5 +3,5 @@ . .idea gomega.iml -TODO.md +TODO .vscode \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index ef428f6f6..9b83dd6d4 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,27 @@ +## 1.27.8 + +### Fixes +- HaveExactElement should not call FailureMessage if a submatcher returned an error [096f392] + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.9.5 to 2.9.7 (#669) [8884bee] + +## 1.27.7 + +### Fixes +- fix: gcustom.MakeMatcher accepts nil as actual value (#666) [57054d5] + +### Maintenance +- update gitignore [05c1bc6] +- Bump github.com/onsi/ginkgo/v2 from 2.9.4 to 2.9.5 (#663) [7cadcf6] +- Bump golang.org/x/net from 0.9.0 to 0.10.0 (#662) [b524839] +- Bump github.com/onsi/ginkgo/v2 from 2.9.2 to 2.9.4 (#661) [5f44694] +- Bump commonmarker from 0.23.8 to 0.23.9 in /docs (#657) [05dc99a] +- Bump nokogiri from 1.14.1 to 1.14.3 in /docs (#658) [3a033d1] +- Replace deprecated NewGomegaWithT with NewWithT (#659) [a19238f] +- Bump golang.org/x/net from 0.8.0 to 0.9.0 (#656) [29ed041] +- Bump actions/setup-go from 3 to 4 (#651) [11b2080] + ## 1.27.6 ### Fixes diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 872592bfb..bc7ec293d 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.27.6" +const GOMEGA_VERSION = "1.27.8" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go index 7cce776c1..dca5b9446 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go +++ b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go @@ -44,7 +44,12 @@ func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool elemMatcher := matchers[i].(omegaMatcher) match, err := elemMatcher.Match(values[i]) - if err != nil || !match { + if err != nil { + matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{ + index: i, + failure: err.Error(), + }) + } else if !match { matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{ index: i, failure: elemMatcher.FailureMessage(values[i]), diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go index 6f9e6fd3a..e62892046 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -65,7 +65,4 @@ const ( // AnnotationArtifactDescription is the annotation key for the human readable description for the artifact. AnnotationArtifactDescription = "org.opencontainers.artifact.description" - - // AnnotationReferrersFiltersApplied is the annotation key for the comma separated list of filters applied by the registry in the referrers listing. - AnnotationReferrersFiltersApplied = "org.opencontainers.referrers.filtersApplied" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go deleted file mode 100644 index 03d76ce43..000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2022 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -// Artifact describes an artifact manifest. -// This structure provides `application/vnd.oci.artifact.manifest.v1+json` mediatype when marshalled to JSON. -type Artifact struct { - // MediaType is the media type of the object this schema refers to. - MediaType string `json:"mediaType"` - - // ArtifactType is the IANA media type of the artifact this schema refers to. - ArtifactType string `json:"artifactType"` - - // Blobs is a collection of blobs referenced by this manifest. - Blobs []Descriptor `json:"blobs,omitempty"` - - // Subject (reference) is an optional link from the artifact to another manifest forming an association between the artifact and the other manifest. - Subject *Descriptor `json:"subject,omitempty"` - - // Annotations contains arbitrary metadata for the artifact manifest. - Annotations map[string]string `json:"annotations,omitempty"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go index e6aa113f0..36b0aeb8f 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -49,13 +49,15 @@ type ImageConfig struct { // StopSignal contains the system call signal that will be sent to the container to exit. StopSignal string `json:"StopSignal,omitempty"` - // ArgsEscaped `[Deprecated]` - This field is present only for legacy - // compatibility with Docker and should not be used by new image builders. - // It is used by Docker for Windows images to indicate that the `Entrypoint` - // or `Cmd` or both, contains only a single element array, that is a - // pre-escaped, and combined into a single string `CommandLine`. If `true` - // the value in `Entrypoint` or `Cmd` should be used as-is to avoid double - // escaping. + // ArgsEscaped + // + // Deprecated: This field is present only for legacy compatibility with + // Docker and should not be used by new image builders. It is used by Docker + // for Windows images to indicate that the `Entrypoint` or `Cmd` or both, + // contains only a single element array, that is a pre-escaped, and combined + // into a single string `CommandLine`. If `true` the value in `Entrypoint` or + // `Cmd` should be used as-is to avoid double escaping. + // https://github.com/opencontainers/image-spec/pull/892 ArgsEscaped bool `json:"ArgsEscaped,omitempty"` } @@ -95,22 +97,8 @@ type Image struct { // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. Author string `json:"author,omitempty"` - // Architecture is the CPU architecture which the binaries in this image are built to run on. - Architecture string `json:"architecture"` - - // Variant is the variant of the specified CPU architecture which image binaries are intended to run on. - Variant string `json:"variant,omitempty"` - - // OS is the name of the operating system which the image is built to run on. - OS string `json:"os"` - - // OSVersion is an optional field specifying the operating system - // version, for example on Windows `10.0.14393.1066`. - OSVersion string `json:"os.version,omitempty"` - - // OSFeatures is an optional field specifying an array of strings, - // each listing a required OS feature (for example on Windows `win32k`). - OSFeatures []string `json:"os.features,omitempty"` + // Platform describes the platform which the image in the manifest runs on. + Platform // Config defines the execution parameters which should be used as a base when running a container using the image. Config ImageConfig `json:"config,omitempty"` diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go index 730a09359..4ce7b54cc 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -23,6 +23,9 @@ type Manifest struct { // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` MediaType string `json:"mediaType,omitempty"` + // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. + ArtifactType string `json:"artifactType,omitempty"` + // Config references a configuration object for a container, by digest. // The referenced configuration object is a JSON blob that the runtime uses to set up the container. Config Descriptor `json:"config"` @@ -36,3 +39,11 @@ type Manifest struct { // Annotations contains arbitrary metadata for the image manifest. Annotations map[string]string `json:"annotations,omitempty"` } + +// ScratchDescriptor is the descriptor of a blob with content of `{}`. +var ScratchDescriptor = Descriptor{ + MediaType: MediaTypeScratch, + Digest: `sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a`, + Size: 2, + Data: []byte(`{}`), +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go index 935b481e3..5dd31255e 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -40,21 +40,36 @@ const ( // MediaTypeImageLayerNonDistributable is the media type for layers referenced by // the manifest but with distribution restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" // MediaTypeImageLayerNonDistributableGzip is the media type for // gzipped layers referenced by the manifest but with distribution // restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" // MediaTypeImageLayerNonDistributableZstd is the media type for zstd // compressed layers referenced by the manifest but with distribution // restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" // MediaTypeImageConfig specifies the media type for the image configuration. MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" - // MediaTypeArtifactManifest specifies the media type for a content descriptor. - MediaTypeArtifactManifest = "application/vnd.oci.artifact.manifest.v1+json" + // MediaTypeScratch specifies the media type for an unused blob containing the value `{}` + MediaTypeScratch = "application/vnd.oci.scratch.v1+json" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index 1afd590fe..3d4119b44 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -25,7 +25,7 @@ const ( VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-dev" + VersionDev = "-rc.3" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go index 25f4e6e82..4e7717d53 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go @@ -33,6 +33,34 @@ type Spec struct { ZOS *ZOS `json:"zos,omitempty" platform:"zos"` } +// Scheduler represents the scheduling attributes for a process. It is based on +// the Linux sched_setattr(2) syscall. +type Scheduler struct { + // Policy represents the scheduling policy (e.g., SCHED_FIFO, SCHED_RR, SCHED_OTHER). + Policy LinuxSchedulerPolicy `json:"policy"` + + // Nice is the nice value for the process, which affects its priority. + Nice int32 `json:"nice,omitempty"` + + // Priority represents the static priority of the process. + Priority int32 `json:"priority,omitempty"` + + // Flags is an array of scheduling flags. + Flags []LinuxSchedulerFlag `json:"flags,omitempty"` + + // The following ones are used by the DEADLINE scheduler. + + // Runtime is the amount of time in nanoseconds during which the process + // is allowed to run in a given period. + Runtime uint64 `json:"runtime,omitempty"` + + // Deadline is the absolute deadline for the process to complete its execution. + Deadline uint64 `json:"deadline,omitempty"` + + // Period is the length of the period in nanoseconds used for determining the process runtime. + Period uint64 `json:"period,omitempty"` +} + // Process contains information to start a specific application inside the container. type Process struct { // Terminal creates an interactive terminal for the container. @@ -60,8 +88,12 @@ type Process struct { ApparmorProfile string `json:"apparmorProfile,omitempty" platform:"linux"` // Specify an oom_score_adj for the container. OOMScoreAdj *int `json:"oomScoreAdj,omitempty" platform:"linux"` + // Scheduler specifies the scheduling attributes for a process + Scheduler *Scheduler `json:"scheduler,omitempty" platform:"linux"` // SelinuxLabel specifies the selinux context that the container process is run as. SelinuxLabel string `json:"selinuxLabel,omitempty" platform:"linux"` + // IOPriority contains the I/O priority settings for the cgroup. + IOPriority *LinuxIOPriority `json:"ioPriority,omitempty" platform:"linux"` } // LinuxCapabilities specifies the list of allowed capabilities that are kept for a process. @@ -79,6 +111,22 @@ type LinuxCapabilities struct { Ambient []string `json:"ambient,omitempty" platform:"linux"` } +// IOPriority represents I/O priority settings for the container's processes within the process group. +type LinuxIOPriority struct { + Class IOPriorityClass `json:"class"` + Priority int `json:"priority"` +} + +// IOPriorityClass represents an I/O scheduling class. +type IOPriorityClass string + +// Possible values for IOPriorityClass. +const ( + IOPRIO_CLASS_RT IOPriorityClass = "IOPRIO_CLASS_RT" + IOPRIO_CLASS_BE IOPriorityClass = "IOPRIO_CLASS_BE" + IOPRIO_CLASS_IDLE IOPriorityClass = "IOPRIO_CLASS_IDLE" +) + // Box specifies dimensions of a rectangle. Used for specifying the size of a console. type Box struct { // Height is the vertical dimension of a box. @@ -789,3 +837,43 @@ type ZOSDevice struct { // Gid of the device. GID *uint32 `json:"gid,omitempty"` } + +// LinuxSchedulerPolicy represents different scheduling policies used with the Linux Scheduler +type LinuxSchedulerPolicy string + +const ( + // SchedOther is the default scheduling policy + SchedOther LinuxSchedulerPolicy = "SCHED_OTHER" + // SchedFIFO is the First-In-First-Out scheduling policy + SchedFIFO LinuxSchedulerPolicy = "SCHED_FIFO" + // SchedRR is the Round-Robin scheduling policy + SchedRR LinuxSchedulerPolicy = "SCHED_RR" + // SchedBatch is the Batch scheduling policy + SchedBatch LinuxSchedulerPolicy = "SCHED_BATCH" + // SchedISO is the Isolation scheduling policy + SchedISO LinuxSchedulerPolicy = "SCHED_ISO" + // SchedIdle is the Idle scheduling policy + SchedIdle LinuxSchedulerPolicy = "SCHED_IDLE" + // SchedDeadline is the Deadline scheduling policy + SchedDeadline LinuxSchedulerPolicy = "SCHED_DEADLINE" +) + +// LinuxSchedulerFlag represents the flags used by the Linux Scheduler. +type LinuxSchedulerFlag string + +const ( + // SchedFlagResetOnFork represents the reset on fork scheduling flag + SchedFlagResetOnFork LinuxSchedulerFlag = "SCHED_FLAG_RESET_ON_FORK" + // SchedFlagReclaim represents the reclaim scheduling flag + SchedFlagReclaim LinuxSchedulerFlag = "SCHED_FLAG_RECLAIM" + // SchedFlagDLOverrun represents the deadline overrun scheduling flag + SchedFlagDLOverrun LinuxSchedulerFlag = "SCHED_FLAG_DL_OVERRUN" + // SchedFlagKeepPolicy represents the keep policy scheduling flag + SchedFlagKeepPolicy LinuxSchedulerFlag = "SCHED_FLAG_KEEP_POLICY" + // SchedFlagKeepParams represents the keep parameters scheduling flag + SchedFlagKeepParams LinuxSchedulerFlag = "SCHED_FLAG_KEEP_PARAMS" + // SchedFlagUtilClampMin represents the utilization clamp minimum scheduling flag + SchedFlagUtilClampMin LinuxSchedulerFlag = "SCHED_FLAG_UTIL_CLAMP_MIN" + // SchedFlagUtilClampMin represents the utilization clamp maximum scheduling flag + SchedFlagUtilClampMax LinuxSchedulerFlag = "SCHED_FLAG_UTIL_CLAMP_MAX" +) diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go index 1b81f3c9d..41933fb17 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go @@ -11,7 +11,7 @@ const ( VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-rc.2" + VersionDev = "-rc.3" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go index 3ffbcc91f..53d4d6626 100644 --- a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go +++ b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go @@ -344,59 +344,59 @@ func parseExtensions(ext []pkix.Extension) (Extensions, error) { out.GithubWorkflowRef = string(e.Value) // END: Deprecated case e.Id.Equal(OIDIssuerV2): - if err := parseDERString(e.Value, &out.Issuer); err != nil { + if err := ParseDERString(e.Value, &out.Issuer); err != nil { return Extensions{}, err } case e.Id.Equal(OIDBuildSignerURI): - if err := parseDERString(e.Value, &out.BuildSignerURI); err != nil { + if err := ParseDERString(e.Value, &out.BuildSignerURI); err != nil { return Extensions{}, err } case e.Id.Equal(OIDBuildSignerDigest): - if err := parseDERString(e.Value, &out.BuildSignerDigest); err != nil { + if err := ParseDERString(e.Value, &out.BuildSignerDigest); err != nil { return Extensions{}, err } case e.Id.Equal(OIDRunnerEnvironment): - if err := parseDERString(e.Value, &out.RunnerEnvironment); err != nil { + if err := ParseDERString(e.Value, &out.RunnerEnvironment); err != nil { return Extensions{}, err } case e.Id.Equal(OIDSourceRepositoryURI): - if err := parseDERString(e.Value, &out.SourceRepositoryURI); err != nil { + if err := ParseDERString(e.Value, &out.SourceRepositoryURI); err != nil { return Extensions{}, err } case e.Id.Equal(OIDSourceRepositoryDigest): - if err := parseDERString(e.Value, &out.SourceRepositoryDigest); err != nil { + if err := ParseDERString(e.Value, &out.SourceRepositoryDigest); err != nil { return Extensions{}, err } case e.Id.Equal(OIDSourceRepositoryRef): - if err := parseDERString(e.Value, &out.SourceRepositoryRef); err != nil { + if err := ParseDERString(e.Value, &out.SourceRepositoryRef); err != nil { return Extensions{}, err } case e.Id.Equal(OIDSourceRepositoryIdentifier): - if err := parseDERString(e.Value, &out.SourceRepositoryIdentifier); err != nil { + if err := ParseDERString(e.Value, &out.SourceRepositoryIdentifier); err != nil { return Extensions{}, err } case e.Id.Equal(OIDSourceRepositoryOwnerURI): - if err := parseDERString(e.Value, &out.SourceRepositoryOwnerURI); err != nil { + if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerURI); err != nil { return Extensions{}, err } case e.Id.Equal(OIDSourceRepositoryOwnerIdentifier): - if err := parseDERString(e.Value, &out.SourceRepositoryOwnerIdentifier); err != nil { + if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerIdentifier); err != nil { return Extensions{}, err } case e.Id.Equal(OIDBuildConfigURI): - if err := parseDERString(e.Value, &out.BuildConfigURI); err != nil { + if err := ParseDERString(e.Value, &out.BuildConfigURI); err != nil { return Extensions{}, err } case e.Id.Equal(OIDBuildConfigDigest): - if err := parseDERString(e.Value, &out.BuildConfigDigest); err != nil { + if err := ParseDERString(e.Value, &out.BuildConfigDigest); err != nil { return Extensions{}, err } case e.Id.Equal(OIDBuildTrigger): - if err := parseDERString(e.Value, &out.BuildTrigger); err != nil { + if err := ParseDERString(e.Value, &out.BuildTrigger); err != nil { return Extensions{}, err } case e.Id.Equal(OIDRunInvocationURI): - if err := parseDERString(e.Value, &out.RunInvocationURI); err != nil { + if err := ParseDERString(e.Value, &out.RunInvocationURI); err != nil { return Extensions{}, err } } @@ -407,9 +407,9 @@ func parseExtensions(ext []pkix.Extension) (Extensions, error) { return out, nil } -// parseDERString decodes a DER-encoded string and puts the value in parsedVal. -// Rerturns an error if the unmarshalling fails or if there are trailing bytes in the encoding. -func parseDERString(val []byte, parsedVal *string) error { +// ParseDERString decodes a DER-encoded string and puts the value in parsedVal. +// Returns an error if the unmarshalling fails or if there are trailing bytes in the encoding. +func ParseDERString(val []byte, parsedVal *string) error { rest, err := asn1.Unmarshal(val, parsedVal) if err != nil { return fmt.Errorf("unexpected error unmarshalling DER-encoded string: %v", err) diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go index 8b61aa15f..2764b4b31 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go @@ -52,16 +52,32 @@ type Image struct { // Cosign describes a container image signed using Cosign type Cosign struct { - Image name.Digest - Annotations map[string]interface{} + Image name.Digest + // ClaimedIdentity is what the signer claims the image to be; usually a registry.com/…/repo:tag, but can also use a digest instead. + // ALMOST ALL consumers MUST verify that ClaimedIdentity in the signature is correct given how user refers to the image; + // e.g. if the user asks to access a signed image example.com/repo/mysql:3.14, + // it is ALMOST ALWAYS necessary to validate that ClaimedIdentity = example.com/repo/mysql:3.14 + // + // Considerations: + // - The user might refer to an image using a digest (example.com/repo/mysql@sha256:…); in that case the registry/…/repo should still match + // - If the image is multi-arch, ClaimedIdentity usually refers to the top-level multi-arch image index also on the per-arch images + // (possibly even if ClaimedIdentity contains a digest!) + // - Older versions of cosign generate signatures where ClaimedIdentity only contains a registry/…/repo ; signature consumers should allow users + // to determine whether such images should be accepted (and, long-term, the default SHOULD be to reject them) + ClaimedIdentity string + Annotations map[string]interface{} } // SimpleContainerImage returns information about a container image in the github.com/containers/image/signature format func (p Cosign) SimpleContainerImage() SimpleContainerImage { + dockerReference := p.Image.Repository.Name() + if p.ClaimedIdentity != "" { + dockerReference = p.ClaimedIdentity + } return SimpleContainerImage{ Critical: Critical{ Identity: Identity{ - DockerReference: p.Image.Repository.Name(), + DockerReference: dockerReference, }, Image: Image{ DockerManifestDigest: p.Image.DigestStr(), @@ -98,6 +114,7 @@ func (p *Cosign) UnmarshalJSON(data []byte) error { return fmt.Errorf("could not parse image digest string %q: %w", digestStr, err) } p.Image = digest + p.ClaimedIdentity = simple.Critical.Identity.DockerReference p.Annotations = simple.Optional return nil } diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go index 2c15eeab7..0e1f9103d 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go @@ -622,60 +622,52 @@ func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error { return fmt.Errorf("%w", errNotPartition) } - fs, pt, arch, err := descr.getPartitionMetadata() - if err != nil { + var p partition + if err := descr.getExtra(binaryUnmarshaler{&p}); err != nil { return fmt.Errorf("%w", err) } // if already primary system partition, nothing to do - if pt == PartPrimSys { + if p.Parttype == PartPrimSys { return nil } - if pt != PartSystem { + if p.Parttype != PartSystem { return fmt.Errorf("%w", errNotSystem) } - olddescr, err := f.getDescriptor(WithPartitionType(PartPrimSys)) - if err != nil && !errors.Is(err, ErrObjectNotFound) { - return fmt.Errorf("%w", err) - } - extra := partition{ - Fstype: fs, - Parttype: PartPrimSys, - } - copy(extra.Arch[:], arch) - - if err := descr.setExtra(extra); err != nil { - return fmt.Errorf("%w", err) - } - - descr.ModifiedAt = so.t.Unix() - - if olddescr != nil { - oldfs, _, oldarch, err := olddescr.getPartitionMetadata() - if err != nil { + // If there is currently a primary system partition, update it. + if d, err := f.getDescriptor(WithPartitionType(PartPrimSys)); err == nil { + var p partition + if err := d.getExtra(binaryUnmarshaler{&p}); err != nil { return fmt.Errorf("%w", err) } - oldextra := partition{ - Fstype: oldfs, - Parttype: PartSystem, - Arch: getSIFArch(oldarch), - } + p.Parttype = PartSystem - if err := olddescr.setExtra(oldextra); err != nil { + if err := d.setExtra(p); err != nil { return fmt.Errorf("%w", err) } - olddescr.ModifiedAt = so.t.Unix() + d.ModifiedAt = so.t.Unix() + } else if !errors.Is(err, ErrObjectNotFound) { + return fmt.Errorf("%w", err) } + // Update the descriptor of the new primary system partition. + p.Parttype = PartPrimSys + + if err := descr.setExtra(p); err != nil { + return fmt.Errorf("%w", err) + } + + descr.ModifiedAt = so.t.Unix() + if err := f.writeDescriptors(); err != nil { return fmt.Errorf("%w", err) } - f.h.Arch = getSIFArch(arch) + f.h.Arch = p.Arch f.h.ModifiedAt = so.t.Unix() if err := f.writeHeader(); err != nil { diff --git a/vendor/github.com/vbauerster/mpb/v8/README.md b/vendor/github.com/vbauerster/mpb/v8/README.md index d5f787276..09825ca08 100644 --- a/vendor/github.com/vbauerster/mpb/v8/README.md +++ b/vendor/github.com/vbauerster/mpb/v8/README.md @@ -82,8 +82,8 @@ func main() { mpb.AppendDecorators( // replace ETA decorator with "done" message, OnComplete event decor.OnComplete( - // ETA decorator with ewma age of 60 - decor.EwmaETA(decor.ET_STYLE_GO, 60, decor.WCSyncWidth), "done", + // ETA decorator with ewma age of 30 + decor.EwmaETA(decor.ET_STYLE_GO, 30, decor.WCSyncWidth), "done", ), ), ) diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/counters.go b/vendor/github.com/vbauerster/mpb/v8/decor/counters.go index 331c3df67..042027578 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/counters.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/counters.go @@ -2,13 +2,6 @@ package decor import ( "fmt" - "strings" -) - -const ( - _ = iota - UnitKiB - UnitKB ) // CountersNoUnit is a wrapper around Counters with no unit param. @@ -17,54 +10,60 @@ func CountersNoUnit(pairFmt string, wcc ...WC) Decorator { } // CountersKibiByte is a wrapper around Counters with predefined unit -// UnitKiB (bytes/1024). +// as SizeB1024(0). func CountersKibiByte(pairFmt string, wcc ...WC) Decorator { - return Counters(UnitKiB, pairFmt, wcc...) + return Counters(SizeB1024(0), pairFmt, wcc...) } // CountersKiloByte is a wrapper around Counters with predefined unit -// UnitKB (bytes/1000). +// as SizeB1000(0). func CountersKiloByte(pairFmt string, wcc ...WC) Decorator { - return Counters(UnitKB, pairFmt, wcc...) + return Counters(SizeB1000(0), pairFmt, wcc...) } // Counters decorator with dynamic unit measure adjustment. // -// `unit` one of [0|UnitKiB|UnitKB] zero for no unit +// `unit` one of [0|SizeB1024(0)|SizeB1000(0)] // -// `pairFmt` printf compatible verbs for current and total pair +// `pairFmt` printf compatible verbs for current and total // // `wcc` optional WC config // -// pairFmt example if unit=UnitKB: +// pairFmt example if unit=SizeB1000(0): // -// pairFmt="%.1f / %.1f" output: "1.0MB / 12.0MB" -// pairFmt="% .1f / % .1f" output: "1.0 MB / 12.0 MB" // pairFmt="%d / %d" output: "1MB / 12MB" // pairFmt="% d / % d" output: "1 MB / 12 MB" -func Counters(unit int, pairFmt string, wcc ...WC) Decorator { - producer := func(unit int, pairFmt string) DecorFunc { - if pairFmt == "" { - pairFmt = "%d / %d" - } else if strings.Count(pairFmt, "%") != 2 { - panic("expected pairFmt with exactly 2 verbs") - } - switch unit { - case UnitKiB: +// pairFmt="%.1f / %.1f" output: "1.0MB / 12.0MB" +// pairFmt="% .1f / % .1f" output: "1.0 MB / 12.0 MB" +// pairFmt="%f / %f" output: "1.000000MB / 12.000000MB" +// pairFmt="% f / % f" output: "1.000000 MB / 12.000000 MB" +func Counters(unit interface{}, pairFmt string, wcc ...WC) Decorator { + producer := func() DecorFunc { + switch unit.(type) { + case SizeB1024: + if pairFmt == "" { + pairFmt = "% d / % d" + } return func(s Statistics) string { return fmt.Sprintf(pairFmt, SizeB1024(s.Current), SizeB1024(s.Total)) } - case UnitKB: + case SizeB1000: + if pairFmt == "" { + pairFmt = "% d / % d" + } return func(s Statistics) string { return fmt.Sprintf(pairFmt, SizeB1000(s.Current), SizeB1000(s.Total)) } default: + if pairFmt == "" { + pairFmt = "%d / %d" + } return func(s Statistics) string { return fmt.Sprintf(pairFmt, s.Current, s.Total) } } } - return Any(producer(unit, pairFmt), wcc...) + return Any(producer(), wcc...) } // TotalNoUnit is a wrapper around Total with no unit param. @@ -73,55 +72,60 @@ func TotalNoUnit(format string, wcc ...WC) Decorator { } // TotalKibiByte is a wrapper around Total with predefined unit -// UnitKiB (bytes/1024). +// as SizeB1024(0). func TotalKibiByte(format string, wcc ...WC) Decorator { - return Total(UnitKiB, format, wcc...) + return Total(SizeB1024(0), format, wcc...) } // TotalKiloByte is a wrapper around Total with predefined unit -// UnitKB (bytes/1000). +// as SizeB1000(0). func TotalKiloByte(format string, wcc ...WC) Decorator { - return Total(UnitKB, format, wcc...) + return Total(SizeB1000(0), format, wcc...) } // Total decorator with dynamic unit measure adjustment. // -// `unit` one of [0|UnitKiB|UnitKB] zero for no unit +// `unit` one of [0|SizeB1024(0)|SizeB1000(0)] // // `format` printf compatible verb for Total // // `wcc` optional WC config // -// format example if unit=UnitKiB: +// format example if unit=SizeB1024(0): // -// format="%.1f" output: "12.0MiB" -// format="% .1f" output: "12.0 MiB" // format="%d" output: "12MiB" // format="% d" output: "12 MiB" -func Total(unit int, format string, wcc ...WC) Decorator { - producer := func(unit int, format string) DecorFunc { - if format == "" { - format = "%d" - } else if strings.Count(format, "%") != 1 { - panic("expected format with exactly 1 verb") - } - - switch unit { - case UnitKiB: +// format="%.1f" output: "12.0MiB" +// format="% .1f" output: "12.0 MiB" +// format="%f" output: "12.000000MiB" +// format="% f" output: "12.000000 MiB" +func Total(unit interface{}, format string, wcc ...WC) Decorator { + producer := func() DecorFunc { + switch unit.(type) { + case SizeB1024: + if format == "" { + format = "% d" + } return func(s Statistics) string { return fmt.Sprintf(format, SizeB1024(s.Total)) } - case UnitKB: + case SizeB1000: + if format == "" { + format = "% d" + } return func(s Statistics) string { return fmt.Sprintf(format, SizeB1000(s.Total)) } default: + if format == "" { + format = "%d" + } return func(s Statistics) string { return fmt.Sprintf(format, s.Total) } } } - return Any(producer(unit, format), wcc...) + return Any(producer(), wcc...) } // CurrentNoUnit is a wrapper around Current with no unit param. @@ -130,55 +134,60 @@ func CurrentNoUnit(format string, wcc ...WC) Decorator { } // CurrentKibiByte is a wrapper around Current with predefined unit -// UnitKiB (bytes/1024). +// as SizeB1024(0). func CurrentKibiByte(format string, wcc ...WC) Decorator { - return Current(UnitKiB, format, wcc...) + return Current(SizeB1024(0), format, wcc...) } // CurrentKiloByte is a wrapper around Current with predefined unit -// UnitKB (bytes/1000). +// as SizeB1000(0). func CurrentKiloByte(format string, wcc ...WC) Decorator { - return Current(UnitKB, format, wcc...) + return Current(SizeB1000(0), format, wcc...) } // Current decorator with dynamic unit measure adjustment. // -// `unit` one of [0|UnitKiB|UnitKB] zero for no unit +// `unit` one of [0|SizeB1024(0)|SizeB1000(0)] // // `format` printf compatible verb for Current // // `wcc` optional WC config // -// format example if unit=UnitKiB: +// format example if unit=SizeB1024(0): // -// format="%.1f" output: "12.0MiB" -// format="% .1f" output: "12.0 MiB" // format="%d" output: "12MiB" // format="% d" output: "12 MiB" -func Current(unit int, format string, wcc ...WC) Decorator { - producer := func(unit int, format string) DecorFunc { - if format == "" { - format = "%d" - } else if strings.Count(format, "%") != 1 { - panic("expected format with exactly 1 verb") - } - - switch unit { - case UnitKiB: +// format="%.1f" output: "12.0MiB" +// format="% .1f" output: "12.0 MiB" +// format="%f" output: "12.000000MiB" +// format="% f" output: "12.000000 MiB" +func Current(unit interface{}, format string, wcc ...WC) Decorator { + producer := func() DecorFunc { + switch unit.(type) { + case SizeB1024: + if format == "" { + format = "% d" + } return func(s Statistics) string { return fmt.Sprintf(format, SizeB1024(s.Current)) } - case UnitKB: + case SizeB1000: + if format == "" { + format = "% d" + } return func(s Statistics) string { return fmt.Sprintf(format, SizeB1000(s.Current)) } default: + if format == "" { + format = "%d" + } return func(s Statistics) string { return fmt.Sprintf(format, s.Current) } } } - return Any(producer(unit, format), wcc...) + return Any(producer(), wcc...) } // InvertedCurrentNoUnit is a wrapper around InvertedCurrent with no unit param. @@ -187,53 +196,58 @@ func InvertedCurrentNoUnit(format string, wcc ...WC) Decorator { } // InvertedCurrentKibiByte is a wrapper around InvertedCurrent with predefined unit -// UnitKiB (bytes/1024). +// as SizeB1024(0). func InvertedCurrentKibiByte(format string, wcc ...WC) Decorator { - return InvertedCurrent(UnitKiB, format, wcc...) + return InvertedCurrent(SizeB1024(0), format, wcc...) } // InvertedCurrentKiloByte is a wrapper around InvertedCurrent with predefined unit -// UnitKB (bytes/1000). +// as SizeB1000(0). func InvertedCurrentKiloByte(format string, wcc ...WC) Decorator { - return InvertedCurrent(UnitKB, format, wcc...) + return InvertedCurrent(SizeB1000(0), format, wcc...) } // InvertedCurrent decorator with dynamic unit measure adjustment. // -// `unit` one of [0|UnitKiB|UnitKB] zero for no unit +// `unit` one of [0|SizeB1024(0)|SizeB1000(0)] // // `format` printf compatible verb for InvertedCurrent // // `wcc` optional WC config // -// format example if unit=UnitKiB: +// format example if unit=SizeB1024(0): // -// format="%.1f" output: "12.0MiB" -// format="% .1f" output: "12.0 MiB" // format="%d" output: "12MiB" // format="% d" output: "12 MiB" -func InvertedCurrent(unit int, format string, wcc ...WC) Decorator { - producer := func(unit int, format string) DecorFunc { - if format == "" { - format = "%d" - } else if strings.Count(format, "%") != 1 { - panic("expected format with exactly 1 verb") - } - - switch unit { - case UnitKiB: +// format="%.1f" output: "12.0MiB" +// format="% .1f" output: "12.0 MiB" +// format="%f" output: "12.000000MiB" +// format="% f" output: "12.000000 MiB" +func InvertedCurrent(unit interface{}, format string, wcc ...WC) Decorator { + producer := func() DecorFunc { + switch unit.(type) { + case SizeB1024: + if format == "" { + format = "% d" + } return func(s Statistics) string { return fmt.Sprintf(format, SizeB1024(s.Total-s.Current)) } - case UnitKB: + case SizeB1000: + if format == "" { + format = "% d" + } return func(s Statistics) string { return fmt.Sprintf(format, SizeB1000(s.Total-s.Current)) } default: + if format == "" { + format = "%d" + } return func(s Statistics) string { return fmt.Sprintf(format, s.Total-s.Current) } } } - return Any(producer(unit, format), wcc...) + return Any(producer(), wcc...) } diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/eta.go b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go index 3594e0185..e33631dab 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/eta.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go @@ -199,8 +199,7 @@ func chooseTimeProducer(style TimeStyle) func(time.Duration) string { } default: return func(remaining time.Duration) string { - // strip off nanoseconds - return ((remaining / time.Second) * time.Second).String() + return remaining.Truncate(time.Second).String() } } } diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go b/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go index 862ae33d2..65a8d9dae 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go @@ -34,8 +34,7 @@ type onAbortWrapper struct { func (d *onAbortWrapper) Decor(s Statistics) string { if s.Aborted { - wc := d.GetConf() - return wc.FormatMsg(d.msg) + return d.GetConf().FormatMsg(d.msg) } return d.Decorator.Decor(s) } diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go b/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go index 6ee926844..0a3897b81 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go @@ -33,8 +33,7 @@ type onCompleteWrapper struct { func (d *onCompleteWrapper) Decor(s Statistics) string { if s.Completed { - wc := d.GetConf() - return wc.FormatMsg(d.msg) + return d.GetConf().FormatMsg(d.msg) } return d.Decorator.Decor(s) } diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go b/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go index 1d3b3a9e0..9709c196c 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go @@ -7,24 +7,25 @@ import ( "github.com/vbauerster/mpb/v8/internal" ) +var _ fmt.Formatter = percentageType(0) + type percentageType float64 func (s percentageType) Format(st fmt.State, verb rune) { - var prec int + prec := -1 switch verb { - case 'd': - case 's': - prec = -1 - default: + case 'f', 'e', 'E': + prec = 6 // default prec of fmt.Printf("%f|%e|%E") + fallthrough + case 'b', 'g', 'G', 'x', 'X': if p, ok := st.Precision(); ok { prec = p - } else { - prec = 6 } + default: + verb, prec = 'f', 0 } - p := bytesPool.Get().(*[]byte) - b := strconv.AppendFloat(*p, float64(s), 'f', prec, 64) + b := strconv.AppendFloat(make([]byte, 0, 16), float64(s), byte(verb), prec, 64) if st.Flag(' ') { b = append(b, ' ', '%') } else { @@ -34,7 +35,6 @@ func (s percentageType) Format(st fmt.State, verb rune) { if err != nil { panic(err) } - bytesPool.Put(p) } // Percentage returns percentage decorator. It's a wrapper of NewPercentage. @@ -44,12 +44,18 @@ func Percentage(wcc ...WC) Decorator { // NewPercentage percentage decorator with custom format string. // +// `format` printf compatible verb +// +// `wcc` optional WC config +// // format examples: // -// format="%.1f" output: "1.0%" -// format="% .1f" output: "1.0 %" // format="%d" output: "1%" // format="% d" output: "1 %" +// format="%.1f" output: "1.0%" +// format="% .1f" output: "1.0 %" +// format="%f" output: "1.000000%" +// format="% f" output: "1.000000 %" func NewPercentage(format string, wcc ...WC) Decorator { if format == "" { format = "% d" diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/pool.go b/vendor/github.com/vbauerster/mpb/v8/decor/pool.go deleted file mode 100644 index cefa9bfac..000000000 --- a/vendor/github.com/vbauerster/mpb/v8/decor/pool.go +++ /dev/null @@ -1,10 +0,0 @@ -package decor - -import "sync" - -var bytesPool = sync.Pool{ - New: func() interface{} { - b := make([]byte, 0, 32) - return &b - }, -} diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go b/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go index 4df27095f..d9950b61c 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go @@ -8,6 +8,13 @@ import ( //go:generate stringer -type=SizeB1024 -trimprefix=_i //go:generate stringer -type=SizeB1000 -trimprefix=_ +var ( + _ fmt.Formatter = SizeB1024(0) + _ fmt.Stringer = SizeB1024(0) + _ fmt.Formatter = SizeB1000(0) + _ fmt.Stringer = SizeB1000(0) +) + const ( _ib SizeB1024 = iota + 1 _iKiB SizeB1024 = 1 << (iota * 10) @@ -22,17 +29,17 @@ const ( type SizeB1024 int64 func (self SizeB1024) Format(st fmt.State, verb rune) { - var prec int + prec := -1 switch verb { - case 'd': - case 's': - prec = -1 - default: + case 'f', 'e', 'E': + prec = 6 // default prec of fmt.Printf("%f|%e|%E") + fallthrough + case 'b', 'g', 'G', 'x', 'X': if p, ok := st.Precision(); ok { prec = p - } else { - prec = 6 } + default: + verb, prec = 'f', 0 } var unit SizeB1024 @@ -49,8 +56,7 @@ func (self SizeB1024) Format(st fmt.State, verb rune) { unit = _iTiB } - p := bytesPool.Get().(*[]byte) - b := strconv.AppendFloat(*p, float64(self)/float64(unit), 'f', prec, 64) + b := strconv.AppendFloat(make([]byte, 0, 24), float64(self)/float64(unit), byte(verb), prec, 64) if st.Flag(' ') { b = append(b, ' ') } @@ -59,7 +65,6 @@ func (self SizeB1024) Format(st fmt.State, verb rune) { if err != nil { panic(err) } - bytesPool.Put(p) } const ( @@ -76,17 +81,17 @@ const ( type SizeB1000 int64 func (self SizeB1000) Format(st fmt.State, verb rune) { - var prec int + prec := -1 switch verb { - case 'd': - case 's': - prec = -1 - default: + case 'f', 'e', 'E': + prec = 6 // default prec of fmt.Printf("%f|%e|%E") + fallthrough + case 'b', 'g', 'G', 'x', 'X': if p, ok := st.Precision(); ok { prec = p - } else { - prec = 6 } + default: + verb, prec = 'f', 0 } var unit SizeB1000 @@ -103,8 +108,7 @@ func (self SizeB1000) Format(st fmt.State, verb rune) { unit = _TB } - p := bytesPool.Get().(*[]byte) - b := strconv.AppendFloat(*p, float64(self)/float64(unit), 'f', prec, 64) + b := strconv.AppendFloat(make([]byte, 0, 24), float64(self)/float64(unit), byte(verb), prec, 64) if st.Flag(' ') { b = append(b, ' ') } @@ -113,5 +117,4 @@ func (self SizeB1000) Format(st fmt.State, verb rune) { if err != nil { panic(err) } - bytesPool.Put(p) } diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/speed.go b/vendor/github.com/vbauerster/mpb/v8/decor/speed.go index dd0ad7001..d4f644704 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/speed.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/speed.go @@ -39,7 +39,7 @@ func (self speedFormatter) Format(st fmt.State, verb rune) { // EwmaSpeed exponential-weighted-moving-average based speed decorator. // For this decorator to work correctly you have to measure each iteration's // duration and pass it to one of the (*Bar).EwmaIncr... family methods. -func EwmaSpeed(unit int, format string, age float64, wcc ...WC) Decorator { +func EwmaSpeed(unit interface{}, format string, age float64, wcc ...WC) Decorator { var average ewma.MovingAverage if age == 0 { average = ewma.NewMovingAverage() @@ -52,7 +52,7 @@ func EwmaSpeed(unit int, format string, age float64, wcc ...WC) Decorator { // MovingAverageSpeed decorator relies on MovingAverage implementation // to calculate its average. // -// `unit` one of [0|UnitKiB|UnitKB] zero for no unit +// `unit` one of [0|SizeB1024(0)|SizeB1000(0)] // // `format` printf compatible verb for value, like "%f" or "%d" // @@ -62,14 +62,11 @@ func EwmaSpeed(unit int, format string, age float64, wcc ...WC) Decorator { // // format examples: // -// unit=UnitKiB, format="%.1f" output: "1.0MiB/s" -// unit=UnitKiB, format="% .1f" output: "1.0 MiB/s" -// unit=UnitKB, format="%.1f" output: "1.0MB/s" -// unit=UnitKB, format="% .1f" output: "1.0 MB/s" -func MovingAverageSpeed(unit int, format string, average ewma.MovingAverage, wcc ...WC) Decorator { - if format == "" { - format = "%.0f" - } +// unit=SizeB1024(0), format="%.1f" output: "1.0MiB/s" +// unit=SizeB1024(0), format="% .1f" output: "1.0 MiB/s" +// unit=SizeB1000(0), format="%.1f" output: "1.0MB/s" +// unit=SizeB1000(0), format="% .1f" output: "1.0 MB/s" +func MovingAverageSpeed(unit interface{}, format string, average ewma.MovingAverage, wcc ...WC) Decorator { d := &movingAverageSpeed{ WC: initWC(wcc...), average: average, @@ -106,14 +103,14 @@ func (d *movingAverageSpeed) EwmaUpdate(n int64, dur time.Duration) { // AverageSpeed decorator with dynamic unit measure adjustment. It's // a wrapper of NewAverageSpeed. -func AverageSpeed(unit int, format string, wcc ...WC) Decorator { +func AverageSpeed(unit interface{}, format string, wcc ...WC) Decorator { return NewAverageSpeed(unit, format, time.Now(), wcc...) } // NewAverageSpeed decorator with dynamic unit measure adjustment and // user provided start time. // -// `unit` one of [0|UnitKiB|UnitKB] zero for no unit +// `unit` one of [0|SizeB1024(0)|SizeB1000(0)] // // `format` printf compatible verb for value, like "%f" or "%d" // @@ -123,14 +120,11 @@ func AverageSpeed(unit int, format string, wcc ...WC) Decorator { // // format examples: // -// unit=UnitKiB, format="%.1f" output: "1.0MiB/s" -// unit=UnitKiB, format="% .1f" output: "1.0 MiB/s" -// unit=UnitKB, format="%.1f" output: "1.0MB/s" -// unit=UnitKB, format="% .1f" output: "1.0 MB/s" -func NewAverageSpeed(unit int, format string, startTime time.Time, wcc ...WC) Decorator { - if format == "" { - format = "%.0f" - } +// unit=SizeB1024(0), format="%.1f" output: "1.0MiB/s" +// unit=SizeB1024(0), format="% .1f" output: "1.0 MiB/s" +// unit=SizeB1000(0), format="%.1f" output: "1.0MB/s" +// unit=SizeB1000(0), format="% .1f" output: "1.0 MB/s" +func NewAverageSpeed(unit interface{}, format string, startTime time.Time, wcc ...WC) Decorator { d := &averageSpeed{ WC: initWC(wcc...), startTime: startTime, @@ -151,7 +145,6 @@ func (d *averageSpeed) Decor(s Statistics) string { speed := float64(s.Current) / float64(time.Since(d.startTime)) d.msg = d.producer(speed * 1e9) } - return d.FormatMsg(d.msg) } @@ -159,17 +152,26 @@ func (d *averageSpeed) AverageAdjust(startTime time.Time) { d.startTime = startTime } -func chooseSpeedProducer(unit int, format string) func(float64) string { - switch unit { - case UnitKiB: +func chooseSpeedProducer(unit interface{}, format string) func(float64) string { + switch unit.(type) { + case SizeB1024: + if format == "" { + format = "% d" + } return func(speed float64) string { return fmt.Sprintf(format, FmtAsSpeed(SizeB1024(math.Round(speed)))) } - case UnitKB: + case SizeB1000: + if format == "" { + format = "% d" + } return func(speed float64) string { return fmt.Sprintf(format, FmtAsSpeed(SizeB1000(math.Round(speed)))) } default: + if format == "" { + format = "%f" + } return func(speed float64) string { return fmt.Sprintf(format, speed) } diff --git a/vendor/github.com/vbauerster/mpb/v8/heap_manager.go b/vendor/github.com/vbauerster/mpb/v8/heap_manager.go index 678dd7c9f..1b2364f77 100644 --- a/vendor/github.com/vbauerster/mpb/v8/heap_manager.go +++ b/vendor/github.com/vbauerster/mpb/v8/heap_manager.go @@ -46,6 +46,7 @@ func (m heapManager) run() { var sync bool for req := range m { + next: switch req.cmd { case h_push: data := req.data.(pushData) @@ -78,7 +79,8 @@ func (m heapManager) run() { select { case data.iter <- b: case <-data.drop: - break + close(data.iter) + break next } } close(data.iter) @@ -88,7 +90,8 @@ func (m heapManager) run() { select { case data.iter <- heap.Pop(&bHeap).(*Bar): case <-data.drop: - break + close(data.iter) + break next } } close(data.iter) diff --git a/vendor/github.com/vbauerster/mpb/v8/progress.go b/vendor/github.com/vbauerster/mpb/v8/progress.go index 9bb557ee4..cc4e3e102 100644 --- a/vendor/github.com/vbauerster/mpb/v8/progress.go +++ b/vendor/github.com/vbauerster/mpb/v8/progress.go @@ -19,7 +19,7 @@ const ( ) // DoneError represents an error when `*mpb.Progress` is done but its functionality is requested. -var DoneError = fmt.Errorf("%T instance can't be reused after it's done!", (*Progress)(nil)) +var DoneError = fmt.Errorf("%T instance can't be reused after it's done", (*Progress)(nil)) // Progress represents a container that renders one or more progress bars. type Progress struct { @@ -351,7 +351,7 @@ func (s *pState) render(cw *cwriter.Writer) (err error) { } func (s *pState) flush(cw *cwriter.Writer, height int) error { - wg := new(sync.WaitGroup) + var wg sync.WaitGroup defer wg.Wait() // waiting for all s.hm.push to complete var popCount int diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index cff0cd49e..2540bd682 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -104,8 +104,8 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { // Index returns the index of the first occurrence of v in s, // or -1 if not present. func Index[E comparable](s []E, v E) int { - for i, vs := range s { - if v == vs { + for i := range s { + if v == s[i] { return i } } @@ -115,8 +115,8 @@ func Index[E comparable](s []E, v E) int { // IndexFunc returns the first index i satisfying f(s[i]), // or -1 if none do. func IndexFunc[E any](s []E, f func(E) bool) int { - for i, v := range s { - if f(v) { + for i := range s { + if f(s[i]) { return i } } @@ -207,12 +207,12 @@ func Compact[S ~[]E, E comparable](s S) S { return s } i := 1 - last := s[0] - for _, v := range s[1:] { - if v != last { - s[i] = v + for k := 1; k < len(s); k++ { + if s[k] != s[k-1] { + if i != k { + s[i] = s[k] + } i++ - last = v } } return s[:i] @@ -224,12 +224,12 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { return s } i := 1 - last := s[0] - for _, v := range s[1:] { - if !eq(v, last) { - s[i] = v + for k := 1; k < len(s); k++ { + if !eq(s[k], s[k-1]) { + if i != k { + s[i] = s[k] + } i++ - last = v } } return s[:i] diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index f14f40da7..231b6448a 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -81,10 +81,12 @@ func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { } // BinarySearchFunc works like BinarySearch, but uses a custom comparison -// function. The slice must be sorted in increasing order, where "increasing" is -// defined by cmp. cmp(a, b) is expected to return an integer comparing the two -// parameters: 0 if a == b, a negative number if a < b and a positive number if -// a > b. +// function. The slice must be sorted in increasing order, where "increasing" +// is defined by cmp. cmp should return 0 if the slice element matches +// the target, a negative number if the slice element precedes the target, +// or a positive number if the slice element follows the target. +// cmp must implement the same ordering as the slice, such that if +// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) { n := len(x) // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index cd057f398..033b6e6db 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -441,7 +441,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { if s.NewWriteScheduler != nil { sc.writeSched = s.NewWriteScheduler() } else { - sc.writeSched = NewPriorityWriteScheduler(nil) + sc.writeSched = newRoundRobinWriteScheduler() } // These start at the RFC-specified defaults. If there is a higher @@ -2429,7 +2429,7 @@ type requestBody struct { conn *serverConn closeOnce sync.Once // for use by Close only sawEOF bool // for use by Read only - pipe *pipe // non-nil if we have a HTTP entity message body + pipe *pipe // non-nil if we have an HTTP entity message body needsContinue bool // need to send a 100-continue } @@ -2569,7 +2569,8 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { clen = "" } } - if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { + _, hasContentLength := rws.snapHeader["Content-Length"] + if !hasContentLength && clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { clen = strconv.Itoa(len(p)) } _, hasContentType := rws.snapHeader["Content-Type"] @@ -2774,7 +2775,7 @@ func (w *responseWriter) FlushError() error { err = rws.bw.Flush() } else { // The bufio.Writer won't call chunkWriter.Write - // (writeChunk with zero bytes, so we have to do it + // (writeChunk with zero bytes), so we have to do it // ourselves to force the HTTP response header and/or // final DATA frame (with END_STREAM) to be sent. _, err = chunkWriter{rws}.Write(nil) diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index ac90a2631..4f08ccba9 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -1268,8 +1268,8 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { cancelRequest := func(cs *clientStream, err error) error { cs.cc.mu.Lock() - defer cs.cc.mu.Unlock() cs.abortStreamLocked(err) + bodyClosed := cs.reqBodyClosed if cs.ID != 0 { // This request may have failed because of a problem with the connection, // or for some unrelated reason. (For example, the user might have canceled @@ -1284,6 +1284,23 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { // will not help. cs.cc.doNotReuse = true } + cs.cc.mu.Unlock() + // Wait for the request body to be closed. + // + // If nothing closed the body before now, abortStreamLocked + // will have started a goroutine to close it. + // + // Closing the body before returning avoids a race condition + // with net/http checking its readTrackingBody to see if the + // body was read from or closed. See golang/go#60041. + // + // The body is closed in a separate goroutine without the + // connection mutex held, but dropping the mutex before waiting + // will keep us from holding it indefinitely if the body + // close is slow for some reason. + if bodyClosed != nil { + <-bodyClosed + } return err } @@ -1899,7 +1916,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail // 8.1.2.3 Request Pseudo-Header Fields // The :path pseudo-header field includes the path and query parts of the // target URI (the path-absolute production and optionally a '?' character - // followed by the query production (see Sections 3.3 and 3.4 of + // followed by the query production, see Sections 3.3 and 3.4 of // [RFC3986]). f(":authority", host) m := req.Method diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go index c7cd00173..cc893adc2 100644 --- a/vendor/golang.org/x/net/http2/writesched.go +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -184,7 +184,8 @@ func (wr *FrameWriteRequest) replyToWriter(err error) { // writeQueue is used by implementations of WriteScheduler. type writeQueue struct { - s []FrameWriteRequest + s []FrameWriteRequest + prev, next *writeQueue } func (q *writeQueue) empty() bool { return len(q.s) == 0 } diff --git a/vendor/golang.org/x/net/http2/writesched_roundrobin.go b/vendor/golang.org/x/net/http2/writesched_roundrobin.go new file mode 100644 index 000000000..54fe86322 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_roundrobin.go @@ -0,0 +1,119 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" +) + +type roundRobinWriteScheduler struct { + // control contains control frames (SETTINGS, PING, etc.). + control writeQueue + + // streams maps stream ID to a queue. + streams map[uint32]*writeQueue + + // stream queues are stored in a circular linked list. + // head is the next stream to write, or nil if there are no streams open. + head *writeQueue + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +// newRoundRobinWriteScheduler constructs a new write scheduler. +// The round robin scheduler priorizes control frames +// like SETTINGS and PING over DATA frames. +// When there are no control frames to send, it performs a round-robin +// selection from the ready streams. +func newRoundRobinWriteScheduler() WriteScheduler { + ws := &roundRobinWriteScheduler{ + streams: make(map[uint32]*writeQueue), + } + return ws +} + +func (ws *roundRobinWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + if ws.streams[streamID] != nil { + panic(fmt.Errorf("stream %d already opened", streamID)) + } + q := ws.queuePool.get() + ws.streams[streamID] = q + if ws.head == nil { + ws.head = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.head.prev + q.next = ws.head + q.prev.next = q + q.next.prev = q + } +} + +func (ws *roundRobinWriteScheduler) CloseStream(streamID uint32) { + q := ws.streams[streamID] + if q == nil { + return + } + if q.next == q { + // This was the only open stream. + ws.head = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.head == q { + ws.head = q.next + } + } + delete(ws.streams, streamID) + ws.queuePool.put(q) +} + +func (ws *roundRobinWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {} + +func (ws *roundRobinWriteScheduler) Push(wr FrameWriteRequest) { + if wr.isControl() { + ws.control.push(wr) + return + } + q := ws.streams[wr.StreamID()] + if q == nil { + // This is a closed stream. + // wr should not be a HEADERS or DATA frame. + // We push the request onto the control queue. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + ws.control.push(wr) + return + } + q.push(wr) +} + +func (ws *roundRobinWriteScheduler) Pop() (FrameWriteRequest, bool) { + // Control and RST_STREAM frames first. + if !ws.control.empty() { + return ws.control.shift(), true + } + if ws.head == nil { + return FrameWriteRequest{}, false + } + q := ws.head + for { + if wr, ok := q.consume(math.MaxInt32); ok { + ws.head = q.next + return wr, true + } + q = q.next + if q == ws.head { + break + } + } + return FrameWriteRequest{}, false +} diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index cbee7a4e2..b18efb743 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -20,7 +20,7 @@ type token struct{} // A zero Group is valid, has no limit on the number of active goroutines, // and does not cancel on error. type Group struct { - cancel func() + cancel func(error) wg sync.WaitGroup @@ -43,7 +43,7 @@ func (g *Group) done() { // returns a non-nil error or the first time Wait returns, whichever occurs // first. func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := withCancelCause(ctx) return &Group{cancel: cancel}, ctx } @@ -52,7 +52,7 @@ func WithContext(ctx context.Context) (*Group, context.Context) { func (g *Group) Wait() error { g.wg.Wait() if g.cancel != nil { - g.cancel() + g.cancel(g.err) } return g.err } @@ -76,7 +76,7 @@ func (g *Group) Go(f func() error) { g.errOnce.Do(func() { g.err = err if g.cancel != nil { - g.cancel() + g.cancel(g.err) } }) } @@ -105,7 +105,7 @@ func (g *Group) TryGo(f func() error) bool { g.errOnce.Do(func() { g.err = err if g.cancel != nil { - g.cancel() + g.cancel(g.err) } }) } diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go new file mode 100644 index 000000000..7d419d376 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/go120.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 +// +build go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + return context.WithCancelCause(parent) +} diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go new file mode 100644 index 000000000..1795c18ac --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/pre_go120.go @@ -0,0 +1,15 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.20 +// +build !go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + ctx, cancel := context.WithCancel(parent) + return ctx, func(error) { cancel() } +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 165ede0f8..03543bd4b 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -128,15 +128,14 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // (from "version"). Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'i': - _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) - return pkg, err + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'v', 'c', 'd': - _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + case 'i': // indexed, till go1.19 + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) return pkg, err - case 'u': + case 'u': // unified, from go1.20 _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) return pkg, err diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 6bb7168d2..e84f19dfa 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -625,7 +625,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse } if pkg.PkgPath == "unsafe" { - pkg.GoFiles = nil // ignore fake unsafe.go file + pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929) + } else if len(pkg.CompiledGoFiles) == 0 { + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + pkg.CompiledGoFiles = pkg.GoFiles } // Assume go list emits only absolute paths for Dir. @@ -663,13 +668,6 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse response.Roots = append(response.Roots, pkg.ID) } - // Work around for pre-go.1.11 versions of go list. - // TODO(matloob): they should be handled by the fallback. - // Can we delete this? - if len(pkg.CompiledGoFiles) == 0 { - pkg.CompiledGoFiles = pkg.GoFiles - } - // Temporary work-around for golang/go#39986. Parse filenames out of // error messages. This happens if there are unrecoverable syntax // errors in the source, so we can't match on a specific error message. @@ -891,6 +889,15 @@ func golistargs(cfg *Config, words []string, goVersion int) []string { // probably because you'd just get the TestMain. fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), } + + // golang/go#60456: with go1.21 and later, go list serves pgo variants, which + // can be costly to compute and may result in redundant processing for the + // caller. Disable these variants. If someone wants to add e.g. a NeedPGO + // mode flag, that should be a separate proposal. + if goVersion >= 21 { + fullargs = append(fullargs, "-pgo=off") + } + fullargs = append(fullargs, cfg.BuildFlags...) fullargs = append(fullargs, "--") fullargs = append(fullargs, words...) diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 0f1505b80..632be722a 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -308,6 +308,9 @@ type Package struct { TypeErrors []types.Error // GoFiles lists the absolute file paths of the package's Go source files. + // It may include files that should not be compiled, for example because + // they contain non-matching build tags, are documentary pseudo-files such as + // unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing. GoFiles []string // CompiledGoFiles lists the absolute file paths of the package's source diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go deleted file mode 100644 index be8f5a867..000000000 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ /dev/null @@ -1,762 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package objectpath defines a naming scheme for types.Objects -// (that is, named entities in Go programs) relative to their enclosing -// package. -// -// Type-checker objects are canonical, so they are usually identified by -// their address in memory (a pointer), but a pointer has meaning only -// within one address space. By contrast, objectpath names allow the -// identity of an object to be sent from one program to another, -// establishing a correspondence between types.Object variables that are -// distinct but logically equivalent. -// -// A single object may have multiple paths. In this example, -// -// type A struct{ X int } -// type B A -// -// the field X has two paths due to its membership of both A and B. -// The For(obj) function always returns one of these paths, arbitrarily -// but consistently. -package objectpath - -import ( - "fmt" - "go/types" - "sort" - "strconv" - "strings" - - "golang.org/x/tools/internal/typeparams" - - _ "unsafe" // for go:linkname -) - -// A Path is an opaque name that identifies a types.Object -// relative to its package. Conceptually, the name consists of a -// sequence of destructuring operations applied to the package scope -// to obtain the original object. -// The name does not include the package itself. -type Path string - -// Encoding -// -// An object path is a textual and (with training) human-readable encoding -// of a sequence of destructuring operators, starting from a types.Package. -// The sequences represent a path through the package/object/type graph. -// We classify these operators by their type: -// -// PO package->object Package.Scope.Lookup -// OT object->type Object.Type -// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] -// TO type->object Type.{At,Field,Method,Obj} [AFMO] -// -// All valid paths start with a package and end at an object -// and thus may be defined by the regular language: -// -// objectpath = PO (OT TT* TO)* -// -// The concrete encoding follows directly: -// - The only PO operator is Package.Scope.Lookup, which requires an identifier. -// - The only OT operator is Object.Type, -// which we encode as '.' because dot cannot appear in an identifier. -// - The TT operators are encoded as [EKPRUTC]; -// one of these (TypeParam) requires an integer operand, -// which is encoded as a string of decimal digits. -// - The TO operators are encoded as [AFMO]; -// three of these (At,Field,Method) require an integer operand, -// which is encoded as a string of decimal digits. -// These indices are stable across different representations -// of the same package, even source and export data. -// The indices used are implementation specific and may not correspond to -// the argument to the go/types function. -// -// In the example below, -// -// package p -// -// type T interface { -// f() (a string, b struct{ X int }) -// } -// -// field X has the path "T.UM0.RA1.F0", -// representing the following sequence of operations: -// -// p.Lookup("T") T -// .Type().Underlying().Method(0). f -// .Type().Results().At(1) b -// .Type().Field(0) X -// -// The encoding is not maximally compact---every R or P is -// followed by an A, for example---but this simplifies the -// encoder and decoder. -const ( - // object->type operators - opType = '.' // .Type() (Object) - - // type->type operators - opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) - opKey = 'K' // .Key() (Map) - opParams = 'P' // .Params() (Signature) - opResults = 'R' // .Results() (Signature) - opUnderlying = 'U' // .Underlying() (Named) - opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) - opConstraint = 'C' // .Constraint() (TypeParam) - - // type->object operators - opAt = 'A' // .At(i) (Tuple) - opField = 'F' // .Field(i) (Struct) - opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) - opObj = 'O' // .Obj() (Named, TypeParam) -) - -// For returns the path to an object relative to its package, -// or an error if the object is not accessible from the package's Scope. -// -// The For function guarantees to return a path only for the following objects: -// - package-level types -// - exported package-level non-types -// - methods -// - parameter and result variables -// - struct fields -// These objects are sufficient to define the API of their package. -// The objects described by a package's export data are drawn from this set. -// -// For does not return a path for predeclared names, imported package -// names, local names, and unexported package-level names (except -// types). -// -// Example: given this definition, -// -// package p -// -// type T interface { -// f() (a string, b struct{ X int }) -// } -// -// For(X) would return a path that denotes the following sequence of operations: -// -// p.Scope().Lookup("T") (TypeName T) -// .Type().Underlying().Method(0). (method Func f) -// .Type().Results().At(1) (field Var b) -// .Type().Field(0) (field Var X) -// -// where p is the package (*types.Package) to which X belongs. -func For(obj types.Object) (Path, error) { - return newEncoderFor()(obj) -} - -// An encoder amortizes the cost of encoding the paths of multiple objects. -// Nonexported pending approval of proposal 58668. -type encoder struct { - scopeNamesMemo map[*types.Scope][]string // memoization of Scope.Names() - namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods() -} - -// Exposed to gopls via golang.org/x/tools/internal/typesinternal -// pending approval of proposal 58668. -// -//go:linkname newEncoderFor -func newEncoderFor() func(types.Object) (Path, error) { return new(encoder).For } - -func (enc *encoder) For(obj types.Object) (Path, error) { - pkg := obj.Pkg() - - // This table lists the cases of interest. - // - // Object Action - // ------ ------ - // nil reject - // builtin reject - // pkgname reject - // label reject - // var - // package-level accept - // func param/result accept - // local reject - // struct field accept - // const - // package-level accept - // local reject - // func - // package-level accept - // init functions reject - // concrete method accept - // interface method accept - // type - // package-level accept - // local reject - // - // The only accessible package-level objects are members of pkg itself. - // - // The cases are handled in four steps: - // - // 1. reject nil and builtin - // 2. accept package-level objects - // 3. reject obviously invalid objects - // 4. search the API for the path to the param/result/field/method. - - // 1. reference to nil or builtin? - if pkg == nil { - return "", fmt.Errorf("predeclared %s has no path", obj) - } - scope := pkg.Scope() - - // 2. package-level object? - if scope.Lookup(obj.Name()) == obj { - // Only exported objects (and non-exported types) have a path. - // Non-exported types may be referenced by other objects. - if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() { - return "", fmt.Errorf("no path for non-exported %v", obj) - } - return Path(obj.Name()), nil - } - - // 3. Not a package-level object. - // Reject obviously non-viable cases. - switch obj := obj.(type) { - case *types.TypeName: - if _, ok := obj.Type().(*typeparams.TypeParam); !ok { - // With the exception of type parameters, only package-level type names - // have a path. - return "", fmt.Errorf("no path for %v", obj) - } - case *types.Const, // Only package-level constants have a path. - *types.Label, // Labels are function-local. - *types.PkgName: // PkgNames are file-local. - return "", fmt.Errorf("no path for %v", obj) - - case *types.Var: - // Could be: - // - a field (obj.IsField()) - // - a func parameter or result - // - a local var. - // Sadly there is no way to distinguish - // a param/result from a local - // so we must proceed to the find. - - case *types.Func: - // A func, if not package-level, must be a method. - if recv := obj.Type().(*types.Signature).Recv(); recv == nil { - return "", fmt.Errorf("func is not a method: %v", obj) - } - - if path, ok := enc.concreteMethod(obj); ok { - // Fast path for concrete methods that avoids looping over scope. - return path, nil - } - - default: - panic(obj) - } - - // 4. Search the API for the path to the var (field/param/result) or method. - - // First inspect package-level named types. - // In the presence of path aliases, these give - // the best paths because non-types may - // refer to types, but not the reverse. - empty := make([]byte, 0, 48) // initial space - names := enc.scopeNames(scope) - for _, name := range names { - o := scope.Lookup(name) - tname, ok := o.(*types.TypeName) - if !ok { - continue // handle non-types in second pass - } - - path := append(empty, name...) - path = append(path, opType) - - T := o.Type() - - if tname.IsAlias() { - // type alias - if r := find(obj, T, path, nil); r != nil { - return Path(r), nil - } - } else { - if named, _ := T.(*types.Named); named != nil { - if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil { - // generic named type - return Path(r), nil - } - } - // defined (named) type - if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { - return Path(r), nil - } - } - } - - // Then inspect everything else: - // non-types, and declared methods of defined types. - for _, name := range names { - o := scope.Lookup(name) - path := append(empty, name...) - if _, ok := o.(*types.TypeName); !ok { - if o.Exported() { - // exported non-type (const, var, func) - if r := find(obj, o.Type(), append(path, opType), nil); r != nil { - return Path(r), nil - } - } - continue - } - - // Inspect declared methods of defined types. - if T, ok := o.Type().(*types.Named); ok { - path = append(path, opType) - // Note that method index here is always with respect - // to canonical ordering of methods, regardless of how - // they appear in the underlying type. - for i, m := range enc.namedMethods(T) { - path2 := appendOpArg(path, opMethod, i) - if m == obj { - return Path(path2), nil // found declared method - } - if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { - return Path(r), nil - } - } - } - } - - return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path()) -} - -func appendOpArg(path []byte, op byte, arg int) []byte { - path = append(path, op) - path = strconv.AppendInt(path, int64(arg), 10) - return path -} - -// concreteMethod returns the path for meth, which must have a non-nil receiver. -// The second return value indicates success and may be false if the method is -// an interface method or if it is an instantiated method. -// -// This function is just an optimization that avoids the general scope walking -// approach. You are expected to fall back to the general approach if this -// function fails. -func (enc *encoder) concreteMethod(meth *types.Func) (Path, bool) { - // Concrete methods can only be declared on package-scoped named types. For - // that reason we can skip the expensive walk over the package scope: the - // path will always be package -> named type -> method. We can trivially get - // the type name from the receiver, and only have to look over the type's - // methods to find the method index. - // - // Methods on generic types require special consideration, however. Consider - // the following package: - // - // L1: type S[T any] struct{} - // L2: func (recv S[A]) Foo() { recv.Bar() } - // L3: func (recv S[B]) Bar() { } - // L4: type Alias = S[int] - // L5: func _[T any]() { var s S[int]; s.Foo() } - // - // The receivers of methods on generic types are instantiations. L2 and L3 - // instantiate S with the type-parameters A and B, which are scoped to the - // respective methods. L4 and L5 each instantiate S with int. Each of these - // instantiations has its own method set, full of methods (and thus objects) - // with receivers whose types are the respective instantiations. In other - // words, we have - // - // S[A].Foo, S[A].Bar - // S[B].Foo, S[B].Bar - // S[int].Foo, S[int].Bar - // - // We may thus be trying to produce object paths for any of these objects. - // - // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo - // and S.Bar, which are the paths that this function naturally produces. - // - // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that - // don't correspond to the origin methods. For S[int], this is significant. - // The most precise object path for S[int].Foo, for example, is Alias.Foo, - // not S.Foo. Our function, however, would produce S.Foo, which would - // resolve to a different object. - // - // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are - // still the correct paths, since only the origin methods have meaningful - // paths. But this is likely only true for trivial cases and has edge cases. - // Since this function is only an optimization, we err on the side of giving - // up, deferring to the slower but definitely correct algorithm. Most users - // of objectpath will only be giving us origin methods, anyway, as referring - // to instantiated methods is usually not useful. - - if typeparams.OriginMethod(meth) != meth { - return "", false - } - - recvT := meth.Type().(*types.Signature).Recv().Type() - if ptr, ok := recvT.(*types.Pointer); ok { - recvT = ptr.Elem() - } - - named, ok := recvT.(*types.Named) - if !ok { - return "", false - } - - if types.IsInterface(named) { - // Named interfaces don't have to be package-scoped - // - // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface - // methods, too, I think. - return "", false - } - - // Preallocate space for the name, opType, opMethod, and some digits. - name := named.Obj().Name() - path := make([]byte, 0, len(name)+8) - path = append(path, name...) - path = append(path, opType) - for i, m := range enc.namedMethods(named) { - if m == meth { - path = appendOpArg(path, opMethod, i) - return Path(path), true - } - } - - panic(fmt.Sprintf("couldn't find method %s on type %s", meth, named)) -} - -// find finds obj within type T, returning the path to it, or nil if not found. -// -// The seen map is used to short circuit cycles through type parameters. If -// nil, it will be allocated as necessary. -func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { - switch T := T.(type) { - case *types.Basic, *types.Named: - // Named types belonging to pkg were handled already, - // so T must belong to another package. No path. - return nil - case *types.Pointer: - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Slice: - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Array: - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Chan: - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Map: - if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { - return r - } - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Signature: - if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil { - return r - } - if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { - return r - } - return find(obj, T.Results(), append(path, opResults), seen) - case *types.Struct: - for i := 0; i < T.NumFields(); i++ { - fld := T.Field(i) - path2 := appendOpArg(path, opField, i) - if fld == obj { - return path2 // found field var - } - if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { - return r - } - } - return nil - case *types.Tuple: - for i := 0; i < T.Len(); i++ { - v := T.At(i) - path2 := appendOpArg(path, opAt, i) - if v == obj { - return path2 // found param/result var - } - if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { - return r - } - } - return nil - case *types.Interface: - for i := 0; i < T.NumMethods(); i++ { - m := T.Method(i) - path2 := appendOpArg(path, opMethod, i) - if m == obj { - return path2 // found interface method - } - if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { - return r - } - } - return nil - case *typeparams.TypeParam: - name := T.Obj() - if name == obj { - return append(path, opObj) - } - if seen[name] { - return nil - } - if seen == nil { - seen = make(map[*types.TypeName]bool) - } - seen[name] = true - if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { - return r - } - return nil - } - panic(T) -} - -func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { - for i := 0; i < list.Len(); i++ { - tparam := list.At(i) - path2 := appendOpArg(path, opTypeParam, i) - if r := find(obj, tparam, path2, seen); r != nil { - return r - } - } - return nil -} - -// Object returns the object denoted by path p within the package pkg. -func Object(pkg *types.Package, p Path) (types.Object, error) { - if p == "" { - return nil, fmt.Errorf("empty path") - } - - pathstr := string(p) - var pkgobj, suffix string - if dot := strings.IndexByte(pathstr, opType); dot < 0 { - pkgobj = pathstr - } else { - pkgobj = pathstr[:dot] - suffix = pathstr[dot:] // suffix starts with "." - } - - obj := pkg.Scope().Lookup(pkgobj) - if obj == nil { - return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj) - } - - // abstraction of *types.{Pointer,Slice,Array,Chan,Map} - type hasElem interface { - Elem() types.Type - } - // abstraction of *types.{Named,Signature} - type hasTypeParams interface { - TypeParams() *typeparams.TypeParamList - } - // abstraction of *types.{Named,TypeParam} - type hasObj interface { - Obj() *types.TypeName - } - - // The loop state is the pair (t, obj), - // exactly one of which is non-nil, initially obj. - // All suffixes start with '.' (the only object->type operation), - // followed by optional type->type operations, - // then a type->object operation. - // The cycle then repeats. - var t types.Type - for suffix != "" { - code := suffix[0] - suffix = suffix[1:] - - // Codes [AFM] have an integer operand. - var index int - switch code { - case opAt, opField, opMethod, opTypeParam: - rest := strings.TrimLeft(suffix, "0123456789") - numerals := suffix[:len(suffix)-len(rest)] - suffix = rest - i, err := strconv.Atoi(numerals) - if err != nil { - return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code) - } - index = int(i) - case opObj: - // no operand - default: - // The suffix must end with a type->object operation. - if suffix == "" { - return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code) - } - } - - if code == opType { - if t != nil { - return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType) - } - t = obj.Type() - obj = nil - continue - } - - if t == nil { - return nil, fmt.Errorf("invalid path: code %q in object context", code) - } - - // Inv: t != nil, obj == nil - - switch code { - case opElem: - hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t) - } - t = hasElem.Elem() - - case opKey: - mapType, ok := t.(*types.Map) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t) - } - t = mapType.Key() - - case opParams: - sig, ok := t.(*types.Signature) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) - } - t = sig.Params() - - case opResults: - sig, ok := t.(*types.Signature) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) - } - t = sig.Results() - - case opUnderlying: - named, ok := t.(*types.Named) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t) - } - t = named.Underlying() - - case opTypeParam: - hasTypeParams, ok := t.(hasTypeParams) // Named, Signature - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t) - } - tparams := hasTypeParams.TypeParams() - if n := tparams.Len(); index >= n { - return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) - } - t = tparams.At(index) - - case opConstraint: - tparam, ok := t.(*typeparams.TypeParam) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) - } - t = tparam.Constraint() - - case opAt: - tuple, ok := t.(*types.Tuple) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t) - } - if n := tuple.Len(); index >= n { - return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) - } - obj = tuple.At(index) - t = nil - - case opField: - structType, ok := t.(*types.Struct) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t) - } - if n := structType.NumFields(); index >= n { - return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n) - } - obj = structType.Field(index) - t = nil - - case opMethod: - switch t := t.(type) { - case *types.Interface: - if index >= t.NumMethods() { - return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) - } - obj = t.Method(index) // Id-ordered - - case *types.Named: - methods := namedMethods(t) // (unmemoized) - if index >= len(methods) { - return nil, fmt.Errorf("method index %d out of range [0-%d)", index, len(methods)) - } - obj = methods[index] // Id-ordered - - default: - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) - } - t = nil - - case opObj: - hasObj, ok := t.(hasObj) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t) - } - obj = hasObj.Obj() - t = nil - - default: - return nil, fmt.Errorf("invalid path: unknown code %q", code) - } - } - - if obj.Pkg() != pkg { - return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) - } - - return obj, nil // success -} - -// namedMethods returns the methods of a Named type in ascending Id order. -func namedMethods(named *types.Named) []*types.Func { - methods := make([]*types.Func, named.NumMethods()) - for i := range methods { - methods[i] = named.Method(i) - } - sort.Slice(methods, func(i, j int) bool { - return methods[i].Id() < methods[j].Id() - }) - return methods -} - -// scopeNames is a memoization of scope.Names. Callers must not modify the result. -func (enc *encoder) scopeNames(scope *types.Scope) []string { - m := enc.scopeNamesMemo - if m == nil { - m = make(map[*types.Scope][]string) - enc.scopeNamesMemo = m - } - names, ok := m[scope] - if !ok { - names = scope.Names() // allocates and sorts - m[scope] = names - } - return names -} - -// namedMethods is a memoization of the namedMethods function. Callers must not modify the result. -func (enc *encoder) namedMethods(named *types.Named) []*types.Func { - m := enc.namedMethodsMemo - if m == nil { - m = make(map[*types.Named][]*types.Func) - enc.namedMethodsMemo = m - } - methods, ok := m[named] - if !ok { - methods = namedMethods(named) // allocates and sorts - m[named] = methods - } - return methods - -} diff --git a/vendor/golang.org/x/tools/internal/event/tag/tag.go b/vendor/golang.org/x/tools/internal/event/tag/tag.go new file mode 100644 index 000000000..ff2f2ecd3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/tag/tag.go @@ -0,0 +1,59 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tag provides the labels used for telemetry throughout gopls. +package tag + +import ( + "golang.org/x/tools/internal/event/keys" +) + +var ( + // create the label keys we use + Method = keys.NewString("method", "") + StatusCode = keys.NewString("status.code", "") + StatusMessage = keys.NewString("status.message", "") + RPCID = keys.NewString("id", "") + RPCDirection = keys.NewString("direction", "") + File = keys.NewString("file", "") + Directory = keys.New("directory", "") + URI = keys.New("URI", "") + Package = keys.NewString("package", "") // Package ID + PackagePath = keys.NewString("package_path", "") + Query = keys.New("query", "") + Snapshot = keys.NewUInt64("snapshot", "") + Operation = keys.NewString("operation", "") + + Position = keys.New("position", "") + Category = keys.NewString("category", "") + PackageCount = keys.NewInt("packages", "") + Files = keys.New("files", "") + Port = keys.NewInt("port", "") + Type = keys.New("type", "") + HoverKind = keys.NewString("hoverkind", "") + + NewServer = keys.NewString("new_server", "A new server was added") + EndServer = keys.NewString("end_server", "A server was shut down") + + ServerID = keys.NewString("server", "The server ID an event is related to") + Logfile = keys.NewString("logfile", "") + DebugAddress = keys.NewString("debug_address", "") + GoplsPath = keys.NewString("gopls_path", "") + ClientID = keys.NewString("client_id", "") + + Level = keys.NewInt("level", "The logging level") +) + +var ( + // create the stats we measure + Started = keys.NewInt64("started", "Count of started RPCs.") + ReceivedBytes = keys.NewInt64("received_bytes", "Bytes received.") //, unit.Bytes) + SentBytes = keys.NewInt64("sent_bytes", "Bytes sent.") //, unit.Bytes) + Latency = keys.NewFloat64("latency_ms", "Elapsed time in milliseconds") //, unit.Milliseconds) +) + +const ( + Inbound = "in" + Outbound = "out" +) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/internal/gcimporter/bexport.go deleted file mode 100644 index 30582ed6d..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/bexport.go +++ /dev/null @@ -1,852 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; -// see that file for specification of the format. - -package gcimporter - -import ( - "bytes" - "encoding/binary" - "fmt" - "go/constant" - "go/token" - "go/types" - "math" - "math/big" - "sort" - "strings" -) - -// If debugFormat is set, each integer and string value is preceded by a marker -// and position information in the encoding. This mechanism permits an importer -// to recognize immediately when it is out of sync. The importer recognizes this -// mode automatically (i.e., it can import export data produced with debugging -// support even if debugFormat is not set at the time of import). This mode will -// lead to massively larger export data (by a factor of 2 to 3) and should only -// be enabled during development and debugging. -// -// NOTE: This flag is the first flag to enable if importing dies because of -// (suspected) format errors, and whenever a change is made to the format. -const debugFormat = false // default: false - -// Current export format version. Increase with each format change. -// -// Note: The latest binary (non-indexed) export format is at version 6. -// This exporter is still at level 4, but it doesn't matter since -// the binary importer can handle older versions just fine. -// -// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE -// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMENTED HERE -// 4: type name objects support type aliases, uses aliasTag -// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) -// 2: removed unused bool in ODCL export (compiler only) -// 1: header format change (more regular), export package for _ struct fields -// 0: Go1.7 encoding -const exportVersion = 4 - -// trackAllTypes enables cycle tracking for all types, not just named -// types. The existing compiler invariants assume that unnamed types -// that are not completely set up are not used, or else there are spurious -// errors. -// If disabled, only named types are tracked, possibly leading to slightly -// less efficient encoding in rare cases. It also prevents the export of -// some corner-case type declarations (but those are not handled correctly -// with with the textual export format either). -// TODO(gri) enable and remove once issues caused by it are fixed -const trackAllTypes = false - -type exporter struct { - fset *token.FileSet - out bytes.Buffer - - // object -> index maps, indexed in order of serialization - strIndex map[string]int - pkgIndex map[*types.Package]int - typIndex map[types.Type]int - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - - // debugging support - written int // bytes written - indent int // for trace -} - -// internalError represents an error generated inside this package. -type internalError string - -func (e internalError) Error() string { return "gcimporter: " + string(e) } - -func internalErrorf(format string, args ...interface{}) error { - return internalError(fmt.Sprintf(format, args...)) -} - -// BExportData returns binary export data for pkg. -// If no file set is provided, position info will be missing. -func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { - if !debug { - defer func() { - if e := recover(); e != nil { - if ierr, ok := e.(internalError); ok { - err = ierr - return - } - // Not an internal error; panic again. - panic(e) - } - }() - } - - p := exporter{ - fset: fset, - strIndex: map[string]int{"": 0}, // empty string is mapped to 0 - pkgIndex: make(map[*types.Package]int), - typIndex: make(map[types.Type]int), - posInfoFormat: true, // TODO(gri) might become a flag, eventually - } - - // write version info - // The version string must start with "version %d" where %d is the version - // number. Additional debugging information may follow after a blank; that - // text is ignored by the importer. - p.rawStringln(fmt.Sprintf("version %d", exportVersion)) - var debug string - if debugFormat { - debug = "debug" - } - p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly - p.bool(trackAllTypes) - p.bool(p.posInfoFormat) - - // --- generic export data --- - - // populate type map with predeclared "known" types - for index, typ := range predeclared() { - p.typIndex[typ] = index - } - if len(p.typIndex) != len(predeclared()) { - return nil, internalError("duplicate entries in type map?") - } - - // write package data - p.pkg(pkg, true) - if trace { - p.tracef("\n") - } - - // write objects - objcount := 0 - scope := pkg.Scope() - for _, name := range scope.Names() { - if !token.IsExported(name) { - continue - } - if trace { - p.tracef("\n") - } - p.obj(scope.Lookup(name)) - objcount++ - } - - // indicate end of list - if trace { - p.tracef("\n") - } - p.tag(endTag) - - // for self-verification only (redundant) - p.int(objcount) - - if trace { - p.tracef("\n") - } - - // --- end of export data --- - - return p.out.Bytes(), nil -} - -func (p *exporter) pkg(pkg *types.Package, emptypath bool) { - if pkg == nil { - panic(internalError("unexpected nil pkg")) - } - - // if we saw the package before, write its index (>= 0) - if i, ok := p.pkgIndex[pkg]; ok { - p.index('P', i) - return - } - - // otherwise, remember the package, write the package tag (< 0) and package data - if trace { - p.tracef("P%d = { ", len(p.pkgIndex)) - defer p.tracef("} ") - } - p.pkgIndex[pkg] = len(p.pkgIndex) - - p.tag(packageTag) - p.string(pkg.Name()) - if emptypath { - p.string("") - } else { - p.string(pkg.Path()) - } -} - -func (p *exporter) obj(obj types.Object) { - switch obj := obj.(type) { - case *types.Const: - p.tag(constTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - p.value(obj.Val()) - - case *types.TypeName: - if obj.IsAlias() { - p.tag(aliasTag) - p.pos(obj) - p.qualifiedName(obj) - } else { - p.tag(typeTag) - } - p.typ(obj.Type()) - - case *types.Var: - p.tag(varTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - - case *types.Func: - p.tag(funcTag) - p.pos(obj) - p.qualifiedName(obj) - sig := obj.Type().(*types.Signature) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - - default: - panic(internalErrorf("unexpected object %v (%T)", obj, obj)) - } -} - -func (p *exporter) pos(obj types.Object) { - if !p.posInfoFormat { - return - } - - file, line := p.fileLine(obj) - if file == p.prevFile { - // common case: write line delta - // delta == 0 means different file or no line change - delta := line - p.prevLine - p.int(delta) - if delta == 0 { - p.int(-1) // -1 means no file change - } - } else { - // different file - p.int(0) - // Encode filename as length of common prefix with previous - // filename, followed by (possibly empty) suffix. Filenames - // frequently share path prefixes, so this can save a lot - // of space and make export data size less dependent on file - // path length. The suffix is unlikely to be empty because - // file names tend to end in ".go". - n := commonPrefixLen(p.prevFile, file) - p.int(n) // n >= 0 - p.string(file[n:]) // write suffix only - p.prevFile = file - p.int(line) - } - p.prevLine = line -} - -func (p *exporter) fileLine(obj types.Object) (file string, line int) { - if p.fset != nil { - pos := p.fset.Position(obj.Pos()) - file = pos.Filename - line = pos.Line - } - return -} - -func commonPrefixLen(a, b string) int { - if len(a) > len(b) { - a, b = b, a - } - // len(a) <= len(b) - i := 0 - for i < len(a) && a[i] == b[i] { - i++ - } - return i -} - -func (p *exporter) qualifiedName(obj types.Object) { - p.string(obj.Name()) - p.pkg(obj.Pkg(), false) -} - -func (p *exporter) typ(t types.Type) { - if t == nil { - panic(internalError("nil type")) - } - - // Possible optimization: Anonymous pointer types *T where - // T is a named type are common. We could canonicalize all - // such types *T to a single type PT = *T. This would lead - // to at most one *T entry in typIndex, and all future *T's - // would be encoded as the respective index directly. Would - // save 1 byte (pointerTag) per *T and reduce the typIndex - // size (at the cost of a canonicalization map). We can do - // this later, without encoding format change. - - // if we saw the type before, write its index (>= 0) - if i, ok := p.typIndex[t]; ok { - p.index('T', i) - return - } - - // otherwise, remember the type, write the type tag (< 0) and type data - if trackAllTypes { - if trace { - p.tracef("T%d = {>\n", len(p.typIndex)) - defer p.tracef("<\n} ") - } - p.typIndex[t] = len(p.typIndex) - } - - switch t := t.(type) { - case *types.Named: - if !trackAllTypes { - // if we don't track all types, track named types now - p.typIndex[t] = len(p.typIndex) - } - - p.tag(namedTag) - p.pos(t.Obj()) - p.qualifiedName(t.Obj()) - p.typ(t.Underlying()) - if !types.IsInterface(t) { - p.assocMethods(t) - } - - case *types.Array: - p.tag(arrayTag) - p.int64(t.Len()) - p.typ(t.Elem()) - - case *types.Slice: - p.tag(sliceTag) - p.typ(t.Elem()) - - case *dddSlice: - p.tag(dddTag) - p.typ(t.elem) - - case *types.Struct: - p.tag(structTag) - p.fieldList(t) - - case *types.Pointer: - p.tag(pointerTag) - p.typ(t.Elem()) - - case *types.Signature: - p.tag(signatureTag) - p.paramList(t.Params(), t.Variadic()) - p.paramList(t.Results(), false) - - case *types.Interface: - p.tag(interfaceTag) - p.iface(t) - - case *types.Map: - p.tag(mapTag) - p.typ(t.Key()) - p.typ(t.Elem()) - - case *types.Chan: - p.tag(chanTag) - p.int(int(3 - t.Dir())) // hack - p.typ(t.Elem()) - - default: - panic(internalErrorf("unexpected type %T: %s", t, t)) - } -} - -func (p *exporter) assocMethods(named *types.Named) { - // Sort methods (for determinism). - var methods []*types.Func - for i := 0; i < named.NumMethods(); i++ { - methods = append(methods, named.Method(i)) - } - sort.Sort(methodsByName(methods)) - - p.int(len(methods)) - - if trace && methods != nil { - p.tracef("associated methods {>\n") - } - - for i, m := range methods { - if trace && i > 0 { - p.tracef("\n") - } - - p.pos(m) - name := m.Name() - p.string(name) - if !exported(name) { - p.pkg(m.Pkg(), false) - } - - sig := m.Type().(*types.Signature) - p.paramList(types.NewTuple(sig.Recv()), false) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - p.int(0) // dummy value for go:nointerface pragma - ignored by importer - } - - if trace && methods != nil { - p.tracef("<\n} ") - } -} - -type methodsByName []*types.Func - -func (x methodsByName) Len() int { return len(x) } -func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } - -func (p *exporter) fieldList(t *types.Struct) { - if trace && t.NumFields() > 0 { - p.tracef("fields {>\n") - defer p.tracef("<\n} ") - } - - p.int(t.NumFields()) - for i := 0; i < t.NumFields(); i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.field(t.Field(i)) - p.string(t.Tag(i)) - } -} - -func (p *exporter) field(f *types.Var) { - if !f.IsField() { - panic(internalError("field expected")) - } - - p.pos(f) - p.fieldName(f) - p.typ(f.Type()) -} - -func (p *exporter) iface(t *types.Interface) { - // TODO(gri): enable importer to load embedded interfaces, - // then emit Embeddeds and ExplicitMethods separately here. - p.int(0) - - n := t.NumMethods() - if trace && n > 0 { - p.tracef("methods {>\n") - defer p.tracef("<\n} ") - } - p.int(n) - for i := 0; i < n; i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.method(t.Method(i)) - } -} - -func (p *exporter) method(m *types.Func) { - sig := m.Type().(*types.Signature) - if sig.Recv() == nil { - panic(internalError("method expected")) - } - - p.pos(m) - p.string(m.Name()) - if m.Name() != "_" && !token.IsExported(m.Name()) { - p.pkg(m.Pkg(), false) - } - - // interface method; no need to encode receiver. - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) -} - -func (p *exporter) fieldName(f *types.Var) { - name := f.Name() - - if f.Anonymous() { - // anonymous field - we distinguish between 3 cases: - // 1) field name matches base type name and is exported - // 2) field name matches base type name and is not exported - // 3) field name doesn't match base type name (alias name) - bname := basetypeName(f.Type()) - if name == bname { - if token.IsExported(name) { - name = "" // 1) we don't need to know the field name or package - } else { - name = "?" // 2) use unexported name "?" to force package export - } - } else { - // 3) indicate alias and export name as is - // (this requires an extra "@" but this is a rare case) - p.string("@") - } - } - - p.string(name) - if name != "" && !token.IsExported(name) { - p.pkg(f.Pkg(), false) - } -} - -func basetypeName(typ types.Type) string { - switch typ := deref(typ).(type) { - case *types.Basic: - return typ.Name() - case *types.Named: - return typ.Obj().Name() - default: - return "" // unnamed type - } -} - -func (p *exporter) paramList(params *types.Tuple, variadic bool) { - // use negative length to indicate unnamed parameters - // (look at the first parameter only since either all - // names are present or all are absent) - n := params.Len() - if n > 0 && params.At(0).Name() == "" { - n = -n - } - p.int(n) - for i := 0; i < params.Len(); i++ { - q := params.At(i) - t := q.Type() - if variadic && i == params.Len()-1 { - t = &dddSlice{t.(*types.Slice).Elem()} - } - p.typ(t) - if n > 0 { - name := q.Name() - p.string(name) - if name != "_" { - p.pkg(q.Pkg(), false) - } - } - p.string("") // no compiler-specific info - } -} - -func (p *exporter) value(x constant.Value) { - if trace { - p.tracef("= ") - } - - switch x.Kind() { - case constant.Bool: - tag := falseTag - if constant.BoolVal(x) { - tag = trueTag - } - p.tag(tag) - - case constant.Int: - if v, exact := constant.Int64Val(x); exact { - // common case: x fits into an int64 - use compact encoding - p.tag(int64Tag) - p.int64(v) - return - } - // uncommon case: large x - use float encoding - // (powers of 2 will be encoded efficiently with exponent) - p.tag(floatTag) - p.float(constant.ToFloat(x)) - - case constant.Float: - p.tag(floatTag) - p.float(x) - - case constant.Complex: - p.tag(complexTag) - p.float(constant.Real(x)) - p.float(constant.Imag(x)) - - case constant.String: - p.tag(stringTag) - p.string(constant.StringVal(x)) - - case constant.Unknown: - // package contains type errors - p.tag(unknownTag) - - default: - panic(internalErrorf("unexpected value %v (%T)", x, x)) - } -} - -func (p *exporter) float(x constant.Value) { - if x.Kind() != constant.Float { - panic(internalErrorf("unexpected constant %v, want float", x)) - } - // extract sign (there is no -0) - sign := constant.Sign(x) - if sign == 0 { - // x == 0 - p.int(0) - return - } - // x != 0 - - var f big.Float - if v, exact := constant.Float64Val(x); exact { - // float64 - f.SetFloat64(v) - } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { - // TODO(gri): add big.Rat accessor to constant.Value. - r := valueToRat(num) - f.SetRat(r.Quo(r, valueToRat(denom))) - } else { - // Value too large to represent as a fraction => inaccessible. - // TODO(gri): add big.Float accessor to constant.Value. - f.SetFloat64(math.MaxFloat64) // FIXME - } - - // extract exponent such that 0.5 <= m < 1.0 - var m big.Float - exp := f.MantExp(&m) - - // extract mantissa as *big.Int - // - set exponent large enough so mant satisfies mant.IsInt() - // - get *big.Int from mant - m.SetMantExp(&m, int(m.MinPrec())) - mant, acc := m.Int(nil) - if acc != big.Exact { - panic(internalError("internal error")) - } - - p.int(sign) - p.int(exp) - p.string(string(mant.Bytes())) -} - -func valueToRat(x constant.Value) *big.Rat { - // Convert little-endian to big-endian. - // I can't believe this is necessary. - bytes := constant.Bytes(x) - for i := 0; i < len(bytes)/2; i++ { - bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] - } - return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) -} - -func (p *exporter) bool(b bool) bool { - if trace { - p.tracef("[") - defer p.tracef("= %v] ", b) - } - - x := 0 - if b { - x = 1 - } - p.int(x) - return b -} - -// ---------------------------------------------------------------------------- -// Low-level encoders - -func (p *exporter) index(marker byte, index int) { - if index < 0 { - panic(internalError("invalid index < 0")) - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%c%d ", marker, index) - } - p.rawInt64(int64(index)) -} - -func (p *exporter) tag(tag int) { - if tag >= 0 { - panic(internalError("invalid tag >= 0")) - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%s ", tagString[-tag]) - } - p.rawInt64(int64(tag)) -} - -func (p *exporter) int(x int) { - p.int64(int64(x)) -} - -func (p *exporter) int64(x int64) { - if debugFormat { - p.marker('i') - } - if trace { - p.tracef("%d ", x) - } - p.rawInt64(x) -} - -func (p *exporter) string(s string) { - if debugFormat { - p.marker('s') - } - if trace { - p.tracef("%q ", s) - } - // if we saw the string before, write its index (>= 0) - // (the empty string is mapped to 0) - if i, ok := p.strIndex[s]; ok { - p.rawInt64(int64(i)) - return - } - // otherwise, remember string and write its negative length and bytes - p.strIndex[s] = len(p.strIndex) - p.rawInt64(-int64(len(s))) - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } -} - -// marker emits a marker byte and position information which makes -// it easy for a reader to detect if it is "out of sync". Used for -// debugFormat format only. -func (p *exporter) marker(m byte) { - p.rawByte(m) - // Enable this for help tracking down the location - // of an incorrect marker when running in debugFormat. - if false && trace { - p.tracef("#%d ", p.written) - } - p.rawInt64(int64(p.written)) -} - -// rawInt64 should only be used by low-level encoders. -func (p *exporter) rawInt64(x int64) { - var tmp [binary.MaxVarintLen64]byte - n := binary.PutVarint(tmp[:], x) - for i := 0; i < n; i++ { - p.rawByte(tmp[i]) - } -} - -// rawStringln should only be used to emit the initial version string. -func (p *exporter) rawStringln(s string) { - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } - p.rawByte('\n') -} - -// rawByte is the bottleneck interface to write to p.out. -// rawByte escapes b as follows (any encoding does that -// hides '$'): -// -// '$' => '|' 'S' -// '|' => '|' '|' -// -// Necessary so other tools can find the end of the -// export data by searching for "$$". -// rawByte should only be used by low-level encoders. -func (p *exporter) rawByte(b byte) { - switch b { - case '$': - // write '$' as '|' 'S' - b = 'S' - fallthrough - case '|': - // write '|' as '|' '|' - p.out.WriteByte('|') - p.written++ - } - p.out.WriteByte(b) - p.written++ -} - -// tracef is like fmt.Printf but it rewrites the format string -// to take care of indentation. -func (p *exporter) tracef(format string, args ...interface{}) { - if strings.ContainsAny(format, "<>\n") { - var buf bytes.Buffer - for i := 0; i < len(format); i++ { - // no need to deal with runes - ch := format[i] - switch ch { - case '>': - p.indent++ - continue - case '<': - p.indent-- - continue - } - buf.WriteByte(ch) - if ch == '\n' { - for j := p.indent; j > 0; j-- { - buf.WriteString(". ") - } - } - } - format = buf.String() - } - fmt.Printf(format, args...) -} - -// Debugging support. -// (tagString is only used when tracing is enabled) -var tagString = [...]string{ - // Packages - -packageTag: "package", - - // Types - -namedTag: "named type", - -arrayTag: "array", - -sliceTag: "slice", - -dddTag: "ddd", - -structTag: "struct", - -pointerTag: "pointer", - -signatureTag: "signature", - -interfaceTag: "interface", - -mapTag: "map", - -chanTag: "chan", - - // Values - -falseTag: "false", - -trueTag: "true", - -int64Tag: "int64", - -floatTag: "float", - -fractionTag: "fraction", - -complexTag: "complex", - -stringTag: "string", - -unknownTag: "unknown", - - // Type aliases - -aliasTag: "alias", -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index b85de0147..d98b0db2a 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -2,340 +2,24 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. +// This file contains the remaining vestiges of +// $GOROOT/src/go/internal/gcimporter/bimport.go. package gcimporter import ( - "encoding/binary" "fmt" - "go/constant" "go/token" "go/types" - "sort" - "strconv" - "strings" "sync" - "unicode" - "unicode/utf8" ) -type importer struct { - imports map[string]*types.Package - data []byte - importpath string - buf []byte // for reading strings - version int // export format version - - // object lists - strList []string // in order of appearance - pathList []string // in order of appearance - pkgList []*types.Package // in order of appearance - typList []types.Type // in order of appearance - interfaceList []*types.Interface // for delayed completion only - trackAllTypes bool - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - fake fakeFileSet - - // debugging support - debugFormat bool - read int // bytes read -} - -// BImportData imports a package from the serialized package data -// and returns the number of bytes consumed and a reference to the package. -// If the export data version is not recognized or the format is otherwise -// compromised, an error is returned. -func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { - // catch panics and return them as errors - const currentVersion = 6 - version := -1 // unknown version - defer func() { - if e := recover(); e != nil { - // Return a (possibly nil or incomplete) package unchanged (see #16088). - if version > currentVersion { - err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) - } else { - err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) - } - } - }() - - p := importer{ - imports: imports, - data: data, - importpath: path, - version: version, - strList: []string{""}, // empty string is mapped to 0 - pathList: []string{""}, // empty string is mapped to 0 - fake: fakeFileSet{ - fset: fset, - files: make(map[string]*fileInfo), - }, - } - defer p.fake.setLines() // set lines for files in fset - - // read version info - var versionstr string - if b := p.rawByte(); b == 'c' || b == 'd' { - // Go1.7 encoding; first byte encodes low-level - // encoding format (compact vs debug). - // For backward-compatibility only (avoid problems with - // old installed packages). Newly compiled packages use - // the extensible format string. - // TODO(gri) Remove this support eventually; after Go1.8. - if b == 'd' { - p.debugFormat = true - } - p.trackAllTypes = p.rawByte() == 'a' - p.posInfoFormat = p.int() != 0 - versionstr = p.string() - if versionstr == "v1" { - version = 0 - } - } else { - // Go1.8 extensible encoding - // read version string and extract version number (ignore anything after the version number) - versionstr = p.rawStringln(b) - if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { - if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { - version = v - } - } - } - p.version = version - - // read version specific flags - extend as necessary - switch p.version { - // case currentVersion: - // ... - // fallthrough - case currentVersion, 5, 4, 3, 2, 1: - p.debugFormat = p.rawStringln(p.rawByte()) == "debug" - p.trackAllTypes = p.int() != 0 - p.posInfoFormat = p.int() != 0 - case 0: - // Go1.7 encoding format - nothing to do here - default: - errorf("unknown bexport format version %d (%q)", p.version, versionstr) - } - - // --- generic export data --- - - // populate typList with predeclared "known" types - p.typList = append(p.typList, predeclared()...) - - // read package data - pkg = p.pkg() - - // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) - objcount := 0 - for { - tag := p.tagOrIndex() - if tag == endTag { - break - } - p.obj(tag) - objcount++ - } - - // self-verification - if count := p.int(); count != objcount { - errorf("got %d objects; want %d", objcount, count) - } - - // ignore compiler-specific import data - - // complete interfaces - // TODO(gri) re-investigate if we still need to do this in a delayed fashion - for _, typ := range p.interfaceList { - typ.Complete() - } - - // record all referenced packages as imports - list := append(([]*types.Package)(nil), p.pkgList[1:]...) - sort.Sort(byPath(list)) - pkg.SetImports(list) - - // package was imported completely and without errors - pkg.MarkComplete() - - return p.read, pkg, nil -} - func errorf(format string, args ...interface{}) { panic(fmt.Sprintf(format, args...)) } -func (p *importer) pkg() *types.Package { - // if the package was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.pkgList[i] - } - - // otherwise, i is the package tag (< 0) - if i != packageTag { - errorf("unexpected package tag %d version %d", i, p.version) - } - - // read package data - name := p.string() - var path string - if p.version >= 5 { - path = p.path() - } else { - path = p.string() - } - if p.version >= 6 { - p.int() // package height; unused by go/types - } - - // we should never see an empty package name - if name == "" { - errorf("empty package name in import") - } - - // an empty path denotes the package we are currently importing; - // it must be the first package we see - if (path == "") != (len(p.pkgList) == 0) { - errorf("package path %q for pkg index %d", path, len(p.pkgList)) - } - - // if the package was imported before, use that one; otherwise create a new one - if path == "" { - path = p.importpath - } - pkg := p.imports[path] - if pkg == nil { - pkg = types.NewPackage(path, name) - p.imports[path] = pkg - } else if pkg.Name() != name { - errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) - } - p.pkgList = append(p.pkgList, pkg) - - return pkg -} - -// objTag returns the tag value for each object kind. -func objTag(obj types.Object) int { - switch obj.(type) { - case *types.Const: - return constTag - case *types.TypeName: - return typeTag - case *types.Var: - return varTag - case *types.Func: - return funcTag - default: - errorf("unexpected object: %v (%T)", obj, obj) // panics - panic("unreachable") - } -} - -func sameObj(a, b types.Object) bool { - // Because unnamed types are not canonicalized, we cannot simply compare types for - // (pointer) identity. - // Ideally we'd check equality of constant values as well, but this is good enough. - return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) -} - -func (p *importer) declare(obj types.Object) { - pkg := obj.Pkg() - if alt := pkg.Scope().Insert(obj); alt != nil { - // This can only trigger if we import a (non-type) object a second time. - // Excluding type aliases, this cannot happen because 1) we only import a package - // once; and b) we ignore compiler-specific export data which may contain - // functions whose inlined function bodies refer to other functions that - // were already imported. - // However, type aliases require reexporting the original type, so we need - // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, - // method importer.obj, switch case importing functions). - // TODO(gri) review/update this comment once the gc compiler handles type aliases. - if !sameObj(obj, alt) { - errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) - } - } -} - -func (p *importer) obj(tag int) { - switch tag { - case constTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - val := p.value() - p.declare(types.NewConst(pos, pkg, name, typ, val)) - - case aliasTag: - // TODO(gri) verify type alias hookup is correct - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - p.declare(types.NewTypeName(pos, pkg, name, typ)) - - case typeTag: - p.typ(nil, nil) - - case varTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - p.declare(types.NewVar(pos, pkg, name, typ)) - - case funcTag: - pos := p.pos() - pkg, name := p.qualifiedName() - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(nil, params, result, isddd) - p.declare(types.NewFunc(pos, pkg, name, sig)) - - default: - errorf("unexpected object tag %d", tag) - } -} - const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go -func (p *importer) pos() token.Pos { - if !p.posInfoFormat { - return token.NoPos - } - - file := p.prevFile - line := p.prevLine - delta := p.int() - line += delta - if p.version >= 5 { - if delta == deltaNewFile { - if n := p.int(); n >= 0 { - // file changed - file = p.path() - line = n - } - } - } else { - if delta == 0 { - if n := p.int(); n >= 0 { - // file changed - file = p.prevFile[:n] + p.string() - line = p.int() - } - } - } - p.prevFile = file - p.prevLine = line - - return p.fake.pos(file, line, 0) -} - // Synthesize a token.Pos type fakeFileSet struct { fset *token.FileSet @@ -389,205 +73,6 @@ var ( fakeLinesOnce sync.Once ) -func (p *importer) qualifiedName() (pkg *types.Package, name string) { - name = p.string() - pkg = p.pkg() - return -} - -func (p *importer) record(t types.Type) { - p.typList = append(p.typList, t) -} - -// A dddSlice is a types.Type representing ...T parameters. -// It only appears for parameter types and does not escape -// the importer. -type dddSlice struct { - elem types.Type -} - -func (t *dddSlice) Underlying() types.Type { return t } -func (t *dddSlice) String() string { return "..." + t.elem.String() } - -// parent is the package which declared the type; parent == nil means -// the package currently imported. The parent package is needed for -// exported struct fields and interface methods which don't contain -// explicit package information in the export data. -// -// A non-nil tname is used as the "owner" of the result type; i.e., -// the result type is the underlying type of tname. tname is used -// to give interface methods a named receiver type where possible. -func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { - // if the type was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.typList[i] - } - - // otherwise, i is the type tag (< 0) - switch i { - case namedTag: - // read type object - pos := p.pos() - parent, name := p.qualifiedName() - scope := parent.Scope() - obj := scope.Lookup(name) - - // if the object doesn't exist yet, create and insert it - if obj == nil { - obj = types.NewTypeName(pos, parent, name, nil) - scope.Insert(obj) - } - - if _, ok := obj.(*types.TypeName); !ok { - errorf("pkg = %s, name = %s => %s", parent, name, obj) - } - - // associate new named type with obj if it doesn't exist yet - t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) - - // but record the existing type, if any - tname := obj.Type().(*types.Named) // tname is either t0 or the existing type - p.record(tname) - - // read underlying type - t0.SetUnderlying(p.typ(parent, t0)) - - // interfaces don't have associated methods - if types.IsInterface(t0) { - return tname - } - - // read associated methods - for i := p.int(); i > 0; i-- { - // TODO(gri) replace this with something closer to fieldName - pos := p.pos() - name := p.string() - if !exported(name) { - p.pkg() - } - - recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? - params, isddd := p.paramList() - result, _ := p.paramList() - p.int() // go:nointerface pragma - discarded - - sig := types.NewSignature(recv.At(0), params, result, isddd) - t0.AddMethod(types.NewFunc(pos, parent, name, sig)) - } - - return tname - - case arrayTag: - t := new(types.Array) - if p.trackAllTypes { - p.record(t) - } - - n := p.int64() - *t = *types.NewArray(p.typ(parent, nil), n) - return t - - case sliceTag: - t := new(types.Slice) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewSlice(p.typ(parent, nil)) - return t - - case dddTag: - t := new(dddSlice) - if p.trackAllTypes { - p.record(t) - } - - t.elem = p.typ(parent, nil) - return t - - case structTag: - t := new(types.Struct) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewStruct(p.fieldList(parent)) - return t - - case pointerTag: - t := new(types.Pointer) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewPointer(p.typ(parent, nil)) - return t - - case signatureTag: - t := new(types.Signature) - if p.trackAllTypes { - p.record(t) - } - - params, isddd := p.paramList() - result, _ := p.paramList() - *t = *types.NewSignature(nil, params, result, isddd) - return t - - case interfaceTag: - // Create a dummy entry in the type list. This is safe because we - // cannot expect the interface type to appear in a cycle, as any - // such cycle must contain a named type which would have been - // first defined earlier. - // TODO(gri) Is this still true now that we have type aliases? - // See issue #23225. - n := len(p.typList) - if p.trackAllTypes { - p.record(nil) - } - - var embeddeds []types.Type - for n := p.int(); n > 0; n-- { - p.pos() - embeddeds = append(embeddeds, p.typ(parent, nil)) - } - - t := newInterface(p.methodList(parent, tname), embeddeds) - p.interfaceList = append(p.interfaceList, t) - if p.trackAllTypes { - p.typList[n] = t - } - return t - - case mapTag: - t := new(types.Map) - if p.trackAllTypes { - p.record(t) - } - - key := p.typ(parent, nil) - val := p.typ(parent, nil) - *t = *types.NewMap(key, val) - return t - - case chanTag: - t := new(types.Chan) - if p.trackAllTypes { - p.record(t) - } - - dir := chanDir(p.int()) - val := p.typ(parent, nil) - *t = *types.NewChan(dir, val) - return t - - default: - errorf("unexpected type tag %d", i) // panics - panic("unreachable") - } -} - func chanDir(d int) types.ChanDir { // tag values must match the constants in cmd/compile/internal/gc/go.go switch d { @@ -603,394 +88,6 @@ func chanDir(d int) types.ChanDir { } } -func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { - if n := p.int(); n > 0 { - fields = make([]*types.Var, n) - tags = make([]string, n) - for i := range fields { - fields[i], tags[i] = p.field(parent) - } - } - return -} - -func (p *importer) field(parent *types.Package) (*types.Var, string) { - pos := p.pos() - pkg, name, alias := p.fieldName(parent) - typ := p.typ(parent, nil) - tag := p.string() - - anonymous := false - if name == "" { - // anonymous field - typ must be T or *T and T must be a type name - switch typ := deref(typ).(type) { - case *types.Basic: // basic types are named types - pkg = nil // // objects defined in Universe scope have no package - name = typ.Name() - case *types.Named: - name = typ.Obj().Name() - default: - errorf("named base type expected") - } - anonymous = true - } else if alias { - // anonymous field: we have an explicit name because it's an alias - anonymous = true - } - - return types.NewField(pos, pkg, name, typ, anonymous), tag -} - -func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { - if n := p.int(); n > 0 { - methods = make([]*types.Func, n) - for i := range methods { - methods[i] = p.method(parent, baseType) - } - } - return -} - -func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { - pos := p.pos() - pkg, name, _ := p.fieldName(parent) - // If we don't have a baseType, use a nil receiver. - // A receiver using the actual interface type (which - // we don't know yet) will be filled in when we call - // types.Interface.Complete. - var recv *types.Var - if baseType != nil { - recv = types.NewVar(token.NoPos, parent, "", baseType) - } - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(recv, params, result, isddd) - return types.NewFunc(pos, pkg, name, sig) -} - -func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { - name = p.string() - pkg = parent - if pkg == nil { - // use the imported package instead - pkg = p.pkgList[0] - } - if p.version == 0 && name == "_" { - // version 0 didn't export a package for _ fields - return - } - switch name { - case "": - // 1) field name matches base type name and is exported: nothing to do - case "?": - // 2) field name matches base type name and is not exported: need package - name = "" - pkg = p.pkg() - case "@": - // 3) field name doesn't match type name (alias) - name = p.string() - alias = true - fallthrough - default: - if !exported(name) { - pkg = p.pkg() - } - } - return -} - -func (p *importer) paramList() (*types.Tuple, bool) { - n := p.int() - if n == 0 { - return nil, false - } - // negative length indicates unnamed parameters - named := true - if n < 0 { - n = -n - named = false - } - // n > 0 - params := make([]*types.Var, n) - isddd := false - for i := range params { - params[i], isddd = p.param(named) - } - return types.NewTuple(params...), isddd -} - -func (p *importer) param(named bool) (*types.Var, bool) { - t := p.typ(nil, nil) - td, isddd := t.(*dddSlice) - if isddd { - t = types.NewSlice(td.elem) - } - - var pkg *types.Package - var name string - if named { - name = p.string() - if name == "" { - errorf("expected named parameter") - } - if name != "_" { - pkg = p.pkg() - } - if i := strings.Index(name, "·"); i > 0 { - name = name[:i] // cut off gc-specific parameter numbering - } - } - - // read and discard compiler-specific info - p.string() - - return types.NewVar(token.NoPos, pkg, name, t), isddd -} - -func exported(name string) bool { - ch, _ := utf8.DecodeRuneInString(name) - return unicode.IsUpper(ch) -} - -func (p *importer) value() constant.Value { - switch tag := p.tagOrIndex(); tag { - case falseTag: - return constant.MakeBool(false) - case trueTag: - return constant.MakeBool(true) - case int64Tag: - return constant.MakeInt64(p.int64()) - case floatTag: - return p.float() - case complexTag: - re := p.float() - im := p.float() - return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) - case stringTag: - return constant.MakeString(p.string()) - case unknownTag: - return constant.MakeUnknown() - default: - errorf("unexpected value tag %d", tag) // panics - panic("unreachable") - } -} - -func (p *importer) float() constant.Value { - sign := p.int() - if sign == 0 { - return constant.MakeInt64(0) - } - - exp := p.int() - mant := []byte(p.string()) // big endian - - // remove leading 0's if any - for len(mant) > 0 && mant[0] == 0 { - mant = mant[1:] - } - - // convert to little endian - // TODO(gri) go/constant should have a more direct conversion function - // (e.g., once it supports a big.Float based implementation) - for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { - mant[i], mant[j] = mant[j], mant[i] - } - - // adjust exponent (constant.MakeFromBytes creates an integer value, - // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) - exp -= len(mant) << 3 - if len(mant) > 0 { - for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { - exp++ - } - } - - x := constant.MakeFromBytes(mant) - switch { - case exp < 0: - d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) - x = constant.BinaryOp(x, token.QUO, d) - case exp > 0: - x = constant.Shift(x, token.SHL, uint(exp)) - } - - if sign < 0 { - x = constant.UnaryOp(token.SUB, x, 0) - } - return x -} - -// ---------------------------------------------------------------------------- -// Low-level decoders - -func (p *importer) tagOrIndex() int { - if p.debugFormat { - p.marker('t') - } - - return int(p.rawInt64()) -} - -func (p *importer) int() int { - x := p.int64() - if int64(int(x)) != x { - errorf("exported integer too large") - } - return int(x) -} - -func (p *importer) int64() int64 { - if p.debugFormat { - p.marker('i') - } - - return p.rawInt64() -} - -func (p *importer) path() string { - if p.debugFormat { - p.marker('p') - } - // if the path was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.pathList[i] - } - // otherwise, i is the negative path length (< 0) - a := make([]string, -i) - for n := range a { - a[n] = p.string() - } - s := strings.Join(a, "/") - p.pathList = append(p.pathList, s) - return s -} - -func (p *importer) string() string { - if p.debugFormat { - p.marker('s') - } - // if the string was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.strList[i] - } - // otherwise, i is the negative string length (< 0) - if n := int(-i); n <= cap(p.buf) { - p.buf = p.buf[:n] - } else { - p.buf = make([]byte, n) - } - for i := range p.buf { - p.buf[i] = p.rawByte() - } - s := string(p.buf) - p.strList = append(p.strList, s) - return s -} - -func (p *importer) marker(want byte) { - if got := p.rawByte(); got != want { - errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) - } - - pos := p.read - if n := int(p.rawInt64()); n != pos { - errorf("incorrect position: got %d; want %d", n, pos) - } -} - -// rawInt64 should only be used by low-level decoders. -func (p *importer) rawInt64() int64 { - i, err := binary.ReadVarint(p) - if err != nil { - errorf("read error: %v", err) - } - return i -} - -// rawStringln should only be used to read the initial version string. -func (p *importer) rawStringln(b byte) string { - p.buf = p.buf[:0] - for b != '\n' { - p.buf = append(p.buf, b) - b = p.rawByte() - } - return string(p.buf) -} - -// needed for binary.ReadVarint in rawInt64 -func (p *importer) ReadByte() (byte, error) { - return p.rawByte(), nil -} - -// byte is the bottleneck interface for reading p.data. -// It unescapes '|' 'S' to '$' and '|' '|' to '|'. -// rawByte should only be used by low-level decoders. -func (p *importer) rawByte() byte { - b := p.data[0] - r := 1 - if b == '|' { - b = p.data[1] - r = 2 - switch b { - case 'S': - b = '$' - case '|': - // nothing to do - default: - errorf("unexpected escape sequence in export data") - } - } - p.data = p.data[r:] - p.read += r - return b - -} - -// ---------------------------------------------------------------------------- -// Export format - -// Tags. Must be < 0. -const ( - // Objects - packageTag = -(iota + 1) - constTag - typeTag - varTag - funcTag - endTag - - // Types - namedTag - arrayTag - sliceTag - dddTag - structTag - pointerTag - signatureTag - interfaceTag - mapTag - chanTag - - // Values - falseTag - trueTag - int64Tag - floatTag - fractionTag // not used by gc - complexTag - stringTag - nilTag // only used by gc (appears in exported inlined function bodies) - unknownTag // not used by gc (only appears in packages with errors) - - // Type aliases - aliasTag -) - var predeclOnce sync.Once var predecl []types.Type // initialized lazily diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index a973dece9..b1223713b 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -230,20 +230,17 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func // Or, define a new standard go/types/gcexportdata package. fset := token.NewFileSet() - // The indexed export format starts with an 'i'; the older - // binary export format starts with a 'c', 'd', or 'v' - // (from "version"). Select appropriate importer. + // Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'i': - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'v', 'c', 'd': - _, pkg, err := BImportData(fset, packages, data, id) + case 'i': // indexed, till go1.19 + _, pkg, err := IImportData(fset, packages, data[1:], id) return pkg, err - case 'u': + case 'u': // unified, from go1.20 _, pkg, err := UImportData(fset, packages, data[1:size], id) return pkg, err diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index ba53cdcdd..9930d8c36 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -44,12 +44,12 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) { return out.Bytes(), err } -// IImportShallow decodes "shallow" types.Package data encoded by IExportShallow -// in the same executable. This function cannot import data from +// IImportShallow decodes "shallow" types.Package data encoded by +// IExportShallow in the same executable. This function cannot import data from // cmd/compile or gcexportdata.Write. -func IImportShallow(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string, insert InsertType) (*types.Package, error) { +func IImportShallow(fset *token.FileSet, getPackage GetPackageFunc, data []byte, path string, insert InsertType) (*types.Package, error) { const bundle = false - pkgs, err := iimportCommon(fset, imports, data, bundle, path, insert) + pkgs, err := iimportCommon(fset, getPackage, data, bundle, path, insert) if err != nil { return nil, err } @@ -969,6 +969,16 @@ func constantToFloat(x constant.Value) *big.Float { return &f } +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + // mpint exports a multi-precision integer. // // For unsigned types, small values are written out as a single @@ -1178,3 +1188,12 @@ func (q *objQueue) popHead() types.Object { q.head++ return obj } + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 448f903e8..94a5eba33 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -85,7 +85,7 @@ const ( // If the export data version is not recognized or the format is otherwise // compromised, an error is returned. func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { - pkgs, err := iimportCommon(fset, imports, data, false, path, nil) + pkgs, err := iimportCommon(fset, GetPackageFromMap(imports), data, false, path, nil) if err != nil { return 0, nil, err } @@ -94,10 +94,33 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data [] // IImportBundle imports a set of packages from the serialized package bundle. func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { - return iimportCommon(fset, imports, data, true, "", nil) + return iimportCommon(fset, GetPackageFromMap(imports), data, true, "", nil) } -func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) { +// A GetPackageFunc is a function that gets the package with the given path +// from the importer state, creating it (with the specified name) if necessary. +// It is an abstraction of the map historically used to memoize package creation. +// +// Two calls with the same path must return the same package. +// +// If the given getPackage func returns nil, the import will fail. +type GetPackageFunc = func(path, name string) *types.Package + +// GetPackageFromMap returns a GetPackageFunc that retrieves packages from the +// given map of package path -> package. +// +// The resulting func may mutate m: if a requested package is not found, a new +// package will be inserted into m. +func GetPackageFromMap(m map[string]*types.Package) GetPackageFunc { + return func(path, name string) *types.Package { + if _, ok := m[path]; !ok { + m[path] = types.NewPackage(path, name) + } + return m[path] + } +} + +func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) { const currentVersion = iexportVersionCurrent version := int64(-1) if !debug { @@ -108,7 +131,7 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data } else if version > currentVersion { err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) } else { - err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e) } } }() @@ -117,11 +140,8 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data r := &intReader{bytes.NewReader(data), path} if bundle { - bundleVersion := r.uint64() - switch bundleVersion { - case bundleVersion: - default: - errorf("unknown bundle format version %d", bundleVersion) + if v := r.uint64(); v != bundleVersion { + errorf("unknown bundle format version %d", v) } } @@ -195,10 +215,9 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data if pkgPath == "" { pkgPath = path } - pkg := imports[pkgPath] + pkg := getPackage(pkgPath, pkgName) if pkg == nil { - pkg = types.NewPackage(pkgPath, pkgName) - imports[pkgPath] = pkg + errorf("internal error: getPackage returned nil package for %s", pkgPath) } else if pkg.Name() != pkgName { errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 34fc783f8..b977435f6 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -10,6 +10,7 @@ package gcimporter import ( + "fmt" "go/token" "go/types" "sort" @@ -63,6 +64,14 @@ type typeInfo struct { } func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + if !debug { + defer func() { + if x := recover(); x != nil { + err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x) + } + }() + } + s := string(data) s = s[:strings.LastIndex(s, "\n$$\n")] input := pkgbits.NewPkgDecoder(path, s) diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index d50551693..8d9fc98d8 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -8,10 +8,12 @@ package gocommand import ( "bytes" "context" + "errors" "fmt" "io" "log" "os" + "reflect" "regexp" "runtime" "strconv" @@ -22,6 +24,9 @@ import ( exec "golang.org/x/sys/execabs" "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" + "golang.org/x/tools/internal/event/tag" ) // An Runner will run go command invocations and serialize @@ -51,9 +56,19 @@ func (runner *Runner) initialize() { // 1.14: go: updating go.mod: existing contents have changed since last read var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) +// verb is an event label for the go command verb. +var verb = keys.NewString("verb", "go command verb") + +func invLabels(inv Invocation) []label.Label { + return []label.Label{verb.Of(inv.Verb), tag.Directory.Of(inv.WorkingDir)} +} + // Run is a convenience wrapper around RunRaw. // It returns only stdout and a "friendly" error. func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...) + defer done() + stdout, _, friendly, _ := runner.RunRaw(ctx, inv) return stdout, friendly } @@ -61,6 +76,9 @@ func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, e // RunPiped runs the invocation serially, always waiting for any concurrent // invocations to complete first. func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { + ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...) + defer done() + _, err := runner.runPiped(ctx, inv, stdout, stderr) return err } @@ -68,6 +86,8 @@ func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stde // RunRaw runs the invocation, serializing requests only if they fight over // go.mod changes. func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...) + defer done() // Make sure the runner is always initialized. runner.initialize() @@ -215,6 +235,18 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd := exec.Command("go", goArgs...) cmd.Stdout = stdout cmd.Stderr = stderr + + // cmd.WaitDelay was added only in go1.20 (see #50436). + if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + waitDelay.Set(reflect.ValueOf(30 * time.Second)) + } + // On darwin the cwd gets resolved to the real path, which breaks anything that // expects the working directory to keep the original path, including the // go command when dealing with modules. @@ -229,6 +261,7 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) cmd.Dir = i.WorkingDir } + defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) return runCmdContext(ctx, cmd) @@ -242,10 +275,85 @@ var DebugHangingGoCommands = false // runCmdContext is like exec.CommandContext except it sends os.Interrupt // before os.Kill. -func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { - if err := cmd.Start(); err != nil { +func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { + // If cmd.Stdout is not an *os.File, the exec package will create a pipe and + // copy it to the Writer in a goroutine until the process has finished and + // either the pipe reaches EOF or command's WaitDelay expires. + // + // However, the output from 'go list' can be quite large, and we don't want to + // keep reading (and allocating buffers) if we've already decided we don't + // care about the output. We don't want to wait for the process to finish, and + // we don't wait to wait for the WaitDelay to expire either. + // + // Instead, if cmd.Stdout requires a copying goroutine we explicitly replace + // it with a pipe (which is an *os.File), which we can close in order to stop + // copying output as soon as we realize we don't care about it. + var stdoutW *os.File + if cmd.Stdout != nil { + if _, ok := cmd.Stdout.(*os.File); !ok { + var stdoutR *os.File + stdoutR, stdoutW, err = os.Pipe() + if err != nil { + return err + } + prevStdout := cmd.Stdout + cmd.Stdout = stdoutW + + stdoutErr := make(chan error, 1) + go func() { + _, err := io.Copy(prevStdout, stdoutR) + if err != nil { + err = fmt.Errorf("copying stdout: %w", err) + } + stdoutErr <- err + }() + defer func() { + // We started a goroutine to copy a stdout pipe. + // Wait for it to finish, or terminate it if need be. + var err2 error + select { + case err2 = <-stdoutErr: + stdoutR.Close() + case <-ctx.Done(): + stdoutR.Close() + // Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close + // should cause the Read call in io.Copy to unblock and return + // immediately, but we still need to receive from stdoutErr to confirm + // that that has happened. + <-stdoutErr + err2 = ctx.Err() + } + if err == nil { + err = err2 + } + }() + + // Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the + // same writer, and have a type that can be compared with ==, at most + // one goroutine at a time will call Write.” + // + // Since we're starting a goroutine that writes to cmd.Stdout, we must + // also update cmd.Stderr so that that still holds. + func() { + defer func() { recover() }() + if cmd.Stderr == prevStdout { + cmd.Stderr = cmd.Stdout + } + }() + } + } + + err = cmd.Start() + if stdoutW != nil { + // The child process has inherited the pipe file, + // so close the copy held in this process. + stdoutW.Close() + stdoutW = nil + } + if err != nil { return err } + resChan := make(chan error, 1) go func() { resChan <- cmd.Wait() @@ -253,11 +361,14 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { // If we're interested in debugging hanging Go commands, stop waiting after a // minute and panic with interesting information. - if DebugHangingGoCommands { + debug := DebugHangingGoCommands + if debug { + timer := time.NewTimer(1 * time.Minute) + defer timer.Stop() select { case err := <-resChan: return err - case <-time.After(1 * time.Minute): + case <-timer.C: HandleHangingGoCommand(cmd.Process) case <-ctx.Done(): } @@ -270,30 +381,25 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { } // Cancelled. Interrupt and see if it ends voluntarily. - cmd.Process.Signal(os.Interrupt) - select { - case err := <-resChan: - return err - case <-time.After(time.Second): + if err := cmd.Process.Signal(os.Interrupt); err == nil { + // (We used to wait only 1s but this proved + // fragile on loaded builder machines.) + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case err := <-resChan: + return err + case <-timer.C: + } } // Didn't shut down in response to interrupt. Kill it hard. // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT // on certain platforms, such as unix. - if err := cmd.Process.Kill(); err != nil && DebugHangingGoCommands { - // Don't panic here as this reliably fails on windows with EINVAL. + if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug { log.Printf("error killing the Go command: %v", err) } - // See above: don't wait indefinitely if we're debugging hanging Go commands. - if DebugHangingGoCommands { - select { - case err := <-resChan: - return err - case <-time.After(10 * time.Second): // a shorter wait as resChan should return quickly following Kill - HandleHangingGoCommand(cmd.Process) - } - } return <-resChan } diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go index 307a76d47..446c5846a 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/version.go +++ b/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -23,21 +23,11 @@ import ( func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { inv.Verb = "list" inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} - inv.Env = append(append([]string{}, inv.Env...), "GO111MODULE=off") - // Unset any unneeded flags, and remove them from BuildFlags, if they're - // present. - inv.ModFile = "" + inv.BuildFlags = nil // This is not a build command. inv.ModFlag = "" - var buildFlags []string - for _, flag := range inv.BuildFlags { - // Flags can be prefixed by one or two dashes. - f := strings.TrimPrefix(strings.TrimPrefix(flag, "-"), "-") - if strings.HasPrefix(f, "mod=") || strings.HasPrefix(f, "modfile=") { - continue - } - buildFlags = append(buildFlags, flag) - } - inv.BuildFlags = buildFlags + inv.ModFile = "" + inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off") + stdoutBytes, err := r.Run(ctx, inv) if err != nil { return 0, err diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go index a3fb2d4f2..7e638ec24 100644 --- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go +++ b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go @@ -7,7 +7,9 @@ package tokeninternal import ( + "fmt" "go/token" + "sort" "sync" "unsafe" ) @@ -57,3 +59,93 @@ func GetLines(file *token.File) []int { panic("unexpected token.File size") } } + +// AddExistingFiles adds the specified files to the FileSet if they +// are not already present. It panics if any pair of files in the +// resulting FileSet would overlap. +func AddExistingFiles(fset *token.FileSet, files []*token.File) { + // Punch through the FileSet encapsulation. + type tokenFileSet struct { + // This type remained essentially consistent from go1.16 to go1.21. + mutex sync.RWMutex + base int + files []*token.File + _ *token.File // changed to atomic.Pointer[token.File] in go1.19 + } + + // If the size of token.FileSet changes, this will fail to compile. + const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{})) + var _ [-delta * delta]int + + type uP = unsafe.Pointer + var ptr *tokenFileSet + *(*uP)(uP(&ptr)) = uP(fset) + ptr.mutex.Lock() + defer ptr.mutex.Unlock() + + // Merge and sort. + newFiles := append(ptr.files, files...) + sort.Slice(newFiles, func(i, j int) bool { + return newFiles[i].Base() < newFiles[j].Base() + }) + + // Reject overlapping files. + // Discard adjacent identical files. + out := newFiles[:0] + for i, file := range newFiles { + if i > 0 { + prev := newFiles[i-1] + if file == prev { + continue + } + if prev.Base()+prev.Size()+1 > file.Base() { + panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", + prev.Name(), prev.Base(), prev.Base()+prev.Size(), + file.Name(), file.Base(), file.Base()+file.Size())) + } + } + out = append(out, file) + } + newFiles = out + + ptr.files = newFiles + + // Advance FileSet.Base(). + if len(newFiles) > 0 { + last := newFiles[len(newFiles)-1] + newBase := last.Base() + last.Size() + 1 + if ptr.base < newBase { + ptr.base = newBase + } + } +} + +// FileSetFor returns a new FileSet containing a sequence of new Files with +// the same base, size, and line as the input files, for use in APIs that +// require a FileSet. +// +// Precondition: the input files must be non-overlapping, and sorted in order +// of their Base. +func FileSetFor(files ...*token.File) *token.FileSet { + fset := token.NewFileSet() + for _, f := range files { + f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) + lines := GetLines(f) + f2.SetLines(lines) + } + return fset +} + +// CloneFileSet creates a new FileSet holding all files in fset. It does not +// create copies of the token.Files in fset: they are added to the resulting +// FileSet unmodified. +func CloneFileSet(fset *token.FileSet) *token.FileSet { + var files []*token.File + fset.Iterate(func(f *token.File) bool { + files = append(files, f) + return true + }) + newFileSet := token.NewFileSet() + AddExistingFiles(newFileSet, files) + return newFileSet +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index 3c53fbc63..ce7d4351b 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -11,8 +11,6 @@ import ( "go/types" "reflect" "unsafe" - - "golang.org/x/tools/go/types/objectpath" ) func SetUsesCgo(conf *types.Config) bool { @@ -52,10 +50,3 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, } var SetGoVersion = func(conf *types.Config, version string) bool { return false } - -// NewObjectpathEncoder returns a function closure equivalent to -// objectpath.For but amortized for multiple (sequential) calls. -// It is a temporary workaround, pending the approval of proposal 58668. -// -//go:linkname NewObjectpathFunc golang.org/x/tools/go/types/objectpath.newEncoderFor -func NewObjectpathFunc() func(types.Object) (objectpath.Path, error) diff --git a/vendor/modules.txt b/vendor/modules.txt index 36904beca..bac67ffdc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,7 @@ -# github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 +# dario.cat/mergo v1.0.0 +## explicit; go 1.13 +dario.cat/mergo +# github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 ## explicit; go 1.16 github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm @@ -6,15 +9,17 @@ github.com/Azure/go-ansiterm/winterm ## explicit; go 1.16 github.com/BurntSushi/toml github.com/BurntSushi/toml/internal -# github.com/Microsoft/go-winio v0.6.0 +# github.com/Microsoft/go-winio v0.6.1 ## explicit; go 1.17 github.com/Microsoft/go-winio github.com/Microsoft/go-winio/backuptar +github.com/Microsoft/go-winio/internal/fs github.com/Microsoft/go-winio/internal/socket +github.com/Microsoft/go-winio/internal/stringbuffer github.com/Microsoft/go-winio/pkg/guid github.com/Microsoft/go-winio/tools/mkwinsyscall github.com/Microsoft/go-winio/vhd -# github.com/Microsoft/hcsshim v0.10.0-rc.7 +# github.com/Microsoft/hcsshim v0.10.0-rc.8 ## explicit; go 1.18 github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim/computestorage @@ -64,7 +69,7 @@ github.com/container-orchestrated-devices/container-device-interface/specs-go # github.com/containerd/cgroups v1.1.0 ## explicit; go 1.17 github.com/containerd/cgroups/stats/v1 -# github.com/containerd/containerd v1.7.0 +# github.com/containerd/containerd v1.7.2 ## explicit; go 1.19 github.com/containerd/containerd/errdefs github.com/containerd/containerd/log @@ -76,7 +81,7 @@ github.com/containerd/stargz-snapshotter/estargz/errorutil # github.com/containers/buildah v1.30.0 ## explicit; go 1.18 github.com/containers/buildah/define -# github.com/containers/common v0.53.0 +# github.com/containers/common v0.54.0 ## explicit; go 1.18 github.com/containers/common/libimage github.com/containers/common/libimage/define @@ -102,7 +107,7 @@ github.com/containers/common/pkg/supplemented github.com/containers/common/pkg/timetype github.com/containers/common/pkg/util github.com/containers/common/version -# github.com/containers/image/v5 v5.25.0 +# github.com/containers/image/v5 v5.26.0 ## explicit; go 1.18 github.com/containers/image/v5/copy github.com/containers/image/v5/directory @@ -234,8 +239,8 @@ github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/host github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process -# github.com/containers/storage v1.46.1 -## explicit; go 1.18 +# github.com/containers/storage v1.47.0 +## explicit; go 1.19 github.com/containers/storage github.com/containers/storage/drivers github.com/containers/storage/drivers/aufs @@ -288,7 +293,7 @@ github.com/coreos/go-systemd/v22/dbus github.com/coreos/go-systemd/v22/internal/dlopen github.com/coreos/go-systemd/v22/journal github.com/coreos/go-systemd/v22/sdjournal -# github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 +# github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 ## explicit github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer # github.com/cyphar/filepath-securejoin v0.2.3 @@ -384,7 +389,7 @@ github.com/gdamore/tcell/v2/terminfo/x/xfce github.com/gdamore/tcell/v2/terminfo/x/xterm github.com/gdamore/tcell/v2/terminfo/x/xterm_kitty github.com/gdamore/tcell/v2/terminfo/x/xterm_termite -# github.com/go-logr/logr v1.2.3 +# github.com/go-logr/logr v1.2.4 ## explicit; go 1.16 github.com/go-logr/logr github.com/go-logr/logr/funcr @@ -419,7 +424,7 @@ github.com/go-openapi/spec # github.com/go-openapi/strfmt v0.21.7 ## explicit; go 1.19 github.com/go-openapi/strfmt -# github.com/go-openapi/swag v0.22.3 +# github.com/go-openapi/swag v0.22.4 ## explicit; go 1.18 github.com/go-openapi/swag # github.com/go-openapi/validate v0.22.1 @@ -454,14 +459,14 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/go-containerregistry v0.14.0 +# github.com/google/go-containerregistry v0.15.2 ## explicit; go 1.18 github.com/google/go-containerregistry/pkg/name # github.com/google/go-intervals v0.0.2 ## explicit; go 1.12 github.com/google/go-intervals/intervalset -# github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 -## explicit; go 1.14 +# github.com/google/pprof v0.0.0-20230323073829-e72429f035bd +## explicit; go 1.19 github.com/google/pprof/profile # github.com/google/uuid v1.3.0 ## explicit @@ -481,9 +486,6 @@ github.com/hashicorp/go-multierror # github.com/hinshun/vt10x v0.0.0-20220301184237-5011da428d02 ## explicit; go 1.14 github.com/hinshun/vt10x -# github.com/imdario/mergo v0.3.15 -## explicit; go 1.13 -github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap @@ -496,7 +498,7 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.16.4 +# github.com/klauspost/compress v1.16.6 ## explicit; go 1.18 github.com/klauspost/compress github.com/klauspost/compress/flate @@ -506,7 +508,7 @@ github.com/klauspost/compress/internal/cpuinfo github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash -# github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 +# github.com/klauspost/pgzip v1.2.6 ## explicit github.com/klauspost/pgzip # github.com/kr/fs v0.1.0 @@ -548,7 +550,7 @@ github.com/mattn/go-shellwords # github.com/miekg/pkcs11 v1.1.1 ## explicit; go 1.12 github.com/miekg/pkcs11 -# github.com/mistifyio/go-zfs/v3 v3.0.0 +# github.com/mistifyio/go-zfs/v3 v3.0.1 ## explicit; go 1.14 github.com/mistifyio/go-zfs/v3 # github.com/mitchellh/mapstructure v1.5.0 @@ -557,7 +559,7 @@ github.com/mitchellh/mapstructure # github.com/moby/sys/mountinfo v0.6.2 ## explicit; go 1.16 github.com/moby/sys/mountinfo -# github.com/moby/term v0.0.0-20221120202655-abb19827d345 +# github.com/moby/term v0.5.0 ## explicit; go 1.18 github.com/moby/term github.com/moby/term/windows @@ -580,7 +582,7 @@ github.com/nxadm/tail/winfile # github.com/oklog/ulid v1.3.1 ## explicit github.com/oklog/ulid -# github.com/onsi/ginkgo/v2 v2.9.2 +# github.com/onsi/ginkgo/v2 v2.11.0 ## explicit; go 1.18 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -602,7 +604,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.27.6 +# github.com/onsi/gomega v1.27.8 ## explicit; go 1.18 github.com/onsi/gomega github.com/onsi/gomega/format @@ -617,11 +619,11 @@ github.com/onsi/gomega/types # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b -## explicit; go 1.17 +# github.com/opencontainers/image-spec v1.1.0-rc3 +## explicit; go 1.18 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v1.1.5 => github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc +# github.com/opencontainers/runc v1.1.7 => github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc ## explicit; go 1.17 github.com/opencontainers/runc/libcontainer/apparmor github.com/opencontainers/runc/libcontainer/cgroups @@ -633,7 +635,7 @@ github.com/opencontainers/runc/libcontainer/devices github.com/opencontainers/runc/libcontainer/user github.com/opencontainers/runc/libcontainer/userns github.com/opencontainers/runc/libcontainer/utils -# github.com/opencontainers/runtime-spec v1.1.0-rc.2 +# github.com/opencontainers/runtime-spec v1.1.0-rc.3 ## explicit github.com/opencontainers/runtime-spec/specs-go # github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69 @@ -673,14 +675,14 @@ github.com/rs/zerolog github.com/rs/zerolog/internal/cbor github.com/rs/zerolog/internal/json github.com/rs/zerolog/log -# github.com/sigstore/fulcio v1.2.0 +# github.com/sigstore/fulcio v1.3.1 ## explicit; go 1.20 github.com/sigstore/fulcio/pkg/certificate -# github.com/sigstore/rekor v1.2.0 +# github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 ## explicit; go 1.19 github.com/sigstore/rekor/pkg/generated/models -# github.com/sigstore/sigstore v1.6.4 -## explicit; go 1.18 +# github.com/sigstore/sigstore v1.7.1 +## explicit; go 1.19 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/signature github.com/sigstore/sigstore/pkg/signature/options @@ -697,7 +699,7 @@ github.com/spf13/pflag # github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 ## explicit github.com/stefanberger/go-pkcs11uri -# github.com/sylabs/sif/v2 v2.11.1 +# github.com/sylabs/sif/v2 v2.11.5 ## explicit; go 1.19 github.com/sylabs/sif/v2/pkg/sif # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 @@ -723,7 +725,7 @@ github.com/ulikunitz/xz/lzma github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm github.com/vbatts/tar-split/tar/storage -# github.com/vbauerster/mpb/v8 v8.3.0 +# github.com/vbauerster/mpb/v8 v8.4.0 ## explicit; go 1.17 github.com/vbauerster/mpb/v8 github.com/vbauerster/mpb/v8/cwriter @@ -777,15 +779,15 @@ golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts -# golang.org/x/exp v0.0.0-20230321023759-10a507213a29 -## explicit; go 1.18 +# golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 +## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices # golang.org/x/mod v0.10.0 ## explicit; go 1.17 golang.org/x/mod/semver -# golang.org/x/net v0.10.0 +# golang.org/x/net v0.11.0 ## explicit; go 1.17 golang.org/x/net/context golang.org/x/net/html @@ -799,8 +801,8 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/sync v0.2.0 -## explicit +# golang.org/x/sync v0.3.0 +## explicit; go 1.17 golang.org/x/sync/errgroup golang.org/x/sync/semaphore # golang.org/x/sys v0.9.0 @@ -836,18 +838,18 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/tools v0.7.0 +# golang.org/x/tools v0.9.3 ## explicit; go 1.18 golang.org/x/tools/cmd/stringer golang.org/x/tools/go/ast/inspector golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/internal/packagesdriver golang.org/x/tools/go/packages -golang.org/x/tools/go/types/objectpath golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core golang.org/x/tools/internal/event/keys golang.org/x/tools/internal/event/label +golang.org/x/tools/internal/event/tag golang.org/x/tools/internal/gcimporter golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/packagesinternal