From efdfbe48d6f57761da3d872fd6c52a3232a67228 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 29 Jun 2023 20:23:42 +0000 Subject: [PATCH] Bump github.com/containers/storage from 1.46.1 to 1.48.0 Bumps [github.com/containers/storage](https://github.com/containers/storage) from 1.46.1 to 1.48.0. - [Release notes](https://github.com/containers/storage/releases) - [Changelog](https://github.com/containers/storage/blob/main/docs/containers-storage-changes.md) - [Commits](https://github.com/containers/storage/compare/v1.46.1...v1.48.0) --- updated-dependencies: - dependency-name: github.com/containers/storage dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 14 +- go.sum | 25 +- .../Microsoft/go-winio/.golangci.yml | 27 +- .../github.com/Microsoft/go-winio/hvsock.go | 6 +- .../Microsoft/go-winio/internal/fs/doc.go | 2 + .../Microsoft/go-winio/internal/fs/fs.go | 202 +++ .../go-winio/internal/fs/security.go | 12 + .../go-winio/internal/fs/zsyscall_windows.go | 64 + .../go-winio/internal/socket/socket.go | 4 +- .../go-winio/internal/stringbuffer/wstring.go | 132 ++ vendor/github.com/Microsoft/go-winio/pipe.go | 22 +- .../tools/mkwinsyscall/mkwinsyscall.go | 85 +- .../Microsoft/go-winio/zsyscall_windows.go | 19 - .../github.com/containers/storage/.cirrus.yml | 17 +- .../containers/storage/.golangci.yml | 64 +- vendor/github.com/containers/storage/Makefile | 47 +- vendor/github.com/containers/storage/VERSION | 2 +- vendor/github.com/containers/storage/check.go | 1153 +++++++++++++++++ .../containers/storage/containers.go | 24 +- .../containers/storage/drivers/aufs/aufs.go | 25 +- .../containers/storage/drivers/btrfs/btrfs.go | 50 +- .../storage/drivers/chown_windows.go | 3 +- .../storage/drivers/copy/copy_linux.go | 1 + .../containers/storage/drivers/counter.go | 2 +- .../storage/drivers/devmapper/device_setup.go | 6 +- .../storage/drivers/devmapper/deviceset.go | 60 +- .../storage/drivers/devmapper/driver.go | 15 +- .../containers/storage/drivers/driver.go | 15 +- .../storage/drivers/driver_darwin.go | 10 +- .../storage/drivers/driver_freebsd.go | 5 +- .../storage/drivers/driver_linux.go | 8 +- .../storage/drivers/driver_solaris.go | 7 +- .../storage/drivers/driver_unsupported.go | 10 +- .../storage/drivers/driver_windows.go | 10 +- .../containers/storage/drivers/fsdiff.go | 11 +- .../storage/drivers/overlay/check.go | 47 +- .../storage/drivers/overlay/mount.go | 2 +- .../storage/drivers/overlay/overlay.go | 244 ++-- .../storage/drivers/quota/projectquota.go | 28 +- .../drivers/quota/projectquota_unsupported.go | 3 +- .../containers/storage/drivers/template.go | 1 + .../containers/storage/drivers/vfs/driver.go | 47 +- .../storage/drivers/windows/windows.go | 9 +- .../containers/storage/drivers/zfs/zfs.go | 13 +- .../github.com/containers/storage/images.go | 16 +- .../github.com/containers/storage/layers.go | 225 ++-- .../containers/storage/pkg/archive/archive.go | 55 +- .../storage/pkg/archive/archive_linux.go | 5 +- .../storage/pkg/archive/archive_unix.go | 2 +- .../storage/pkg/archive/archive_windows.go | 7 +- .../containers/storage/pkg/archive/changes.go | 14 +- .../storage/pkg/archive/changes_linux.go | 1 - .../storage/pkg/archive/changes_windows.go | 1 - .../containers/storage/pkg/archive/copy.go | 1 - .../containers/storage/pkg/archive/diff.go | 2 +- .../storage/pkg/archive/fflags_bsd.go | 2 +- .../storage/pkg/chrootarchive/archive.go | 2 +- .../pkg/chrootarchive/archive_darwin.go | 3 +- .../storage/pkg/chrootarchive/archive_unix.go | 4 +- .../pkg/chrootarchive/archive_windows.go | 3 +- .../storage/pkg/chrootarchive/diff_unix.go | 1 - .../storage/pkg/chunked/cache_linux.go | 40 +- .../storage/pkg/chunked/compression_linux.go | 123 +- .../pkg/chunked/compressor/compressor.go | 78 +- .../storage/pkg/chunked/compressor/rollsum.go | 12 +- .../pkg/chunked/internal/compression.go | 32 +- .../storage/pkg/chunked/storage_linux.go | 82 +- .../pkg/chunked/storage_unsupported.go | 9 +- .../containers/storage/pkg/config/config.go | 5 + .../storage/pkg/devicemapper/devmapper.go | 12 +- .../pkg/devicemapper/devmapper_wrapper.go | 15 +- .../storage/pkg/fileutils/fileutils.go | 7 +- .../storage/pkg/idmap/idmapped_utils.go | 4 +- .../storage/pkg/idtools/idtools_unix.go | 6 +- .../storage/pkg/idtools/usergroupadd_linux.go | 1 - .../storage/pkg/idtools/utils_unix.go | 4 +- .../storage/pkg/lockfile/lockfile_unix.go | 2 +- .../storage/pkg/lockfile/lockfile_windows.go | 1 + .../storage/pkg/loopback/attach_loopback.go | 7 +- .../pkg/parsers/kernel/kernel_windows.go | 1 - .../containers/storage/pkg/regexp/regexp.go | 98 +- .../storage/pkg/stringid/stringid.go | 2 +- .../containers/storage/pkg/system/errors.go | 6 +- .../storage/pkg/system/init_windows.go | 1 - .../storage/pkg/system/meminfo_solaris.go | 1 - .../containers/storage/pkg/system/path.go | 1 - .../containers/storage/pkg/system/rm.go | 6 + .../storage/pkg/system/stat_common.go | 3 +- .../storage/pkg/system/stat_darwin.go | 6 +- .../storage/pkg/system/stat_freebsd.go | 6 +- .../storage/pkg/system/stat_linux.go | 6 +- .../storage/pkg/system/stat_openbsd.go | 6 +- .../storage/pkg/system/stat_solaris.go | 6 +- .../storage/pkg/system/stat_windows.go | 3 +- .../storage/pkg/truncindex/truncindex.go | 4 +- .../storage/pkg/unshare/unshare_freebsd.go | 2 +- .../storage/pkg/unshare/unshare_linux.go | 2 +- .../containers/storage/storage.conf | 11 +- vendor/github.com/containers/storage/store.go | 1021 ++++++++------- .../containers/storage/types/errors.go | 37 + .../containers/storage/types/options.go | 51 +- .../storage/types/options_darwin.go | 9 +- .../storage/types/options_freebsd.go | 5 + .../containers/storage/types/options_linux.go | 38 + .../storage/types/options_windows.go | 5 + .../storage/types/storage_test.conf | 10 + .../containers/storage/types/utils.go | 11 +- .../github.com/klauspost/compress/README.md | 13 + .../github.com/klauspost/compress/SECURITY.md | 25 + .../klauspost/compress/flate/deflate.go | 5 +- .../compress/flate/huffman_bit_writer.go | 5 - .../compress/flate/huffman_sortByFreq.go | 19 - .../klauspost/compress/huff0/bitwriter.go | 8 - .../klauspost/compress/huff0/decompress.go | 2 +- .../compress/internal/snapref/encode_other.go | 12 - .../klauspost/compress/zstd/README.md | 2 +- .../klauspost/compress/zstd/blockdec.go | 2 +- .../klauspost/compress/zstd/bytebuf.go | 2 +- .../compress/zstd/decoder_options.go | 2 +- .../klauspost/compress/zstd/enc_fast.go | 6 +- .../compress/zstd/encoder_options.go | 2 +- .../klauspost/compress/zstd/framedec.go | 8 +- .../klauspost/compress/zstd/matchlen_amd64.go | 16 + .../klauspost/compress/zstd/matchlen_amd64.s | 68 + .../compress/zstd/matchlen_generic.go | 33 + .../klauspost/compress/zstd/zstd.go | 22 - .../github.com/mistifyio/go-zfs/v3/utils.go | 9 +- .../mistifyio/go-zfs/v3/utils_notsolaris.go | 2 +- .../mistifyio/go-zfs/v3/utils_solaris.go | 2 +- .../github.com/mistifyio/go-zfs/v3/zpool.go | 3 - .../runtime-spec/specs-go/config.go | 88 ++ .../runtime-spec/specs-go/version.go | 2 +- vendor/modules.txt | 18 +- 133 files changed, 3669 insertions(+), 1402 deletions(-) create mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/doc.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/fs.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/security.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go create mode 100644 vendor/github.com/containers/storage/check.go create mode 100644 vendor/github.com/klauspost/compress/SECURITY.md create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_generic.go diff --git a/go.mod b/go.mod index 9bcdc7edb..04b75aae4 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/containers/buildah v1.30.0 github.com/containers/common v0.53.0 github.com/containers/podman/v4 v4.5.1 - github.com/containers/storage v1.46.1 + github.com/containers/storage v1.48.0 github.com/docker/distribution v2.8.2+incompatible github.com/docker/docker v24.0.2+incompatible github.com/docker/go-units v0.5.0 @@ -27,7 +27,7 @@ require ( require ( github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect - github.com/Microsoft/go-winio v0.6.0 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Microsoft/hcsshim v0.10.0-rc.7 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect @@ -79,8 +79,8 @@ require ( github.com/jinzhu/copier v0.3.5 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.16.4 // indirect - github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect + github.com/klauspost/compress v1.16.6 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect github.com/kr/fs v0.1.0 // indirect github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect @@ -91,7 +91,7 @@ require ( github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mattn/go-shellwords v1.0.12 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect - github.com/mistifyio/go-zfs/v3 v3.0.0 // indirect + github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect github.com/moby/term v0.0.0-20221120202655-abb19827d345 // indirect @@ -101,8 +101,8 @@ require ( github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect - github.com/opencontainers/runc v1.1.5 // indirect - github.com/opencontainers/runtime-spec v1.1.0-rc.2 // indirect + github.com/opencontainers/runc v1.1.7 // indirect + github.com/opencontainers/runtime-spec v1.1.0-rc.3 // indirect github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69 // indirect github.com/opencontainers/selinux v1.11.0 // indirect github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect diff --git a/go.sum b/go.sum index 17472f4d4..dd137d16b 100644 --- a/go.sum +++ b/go.sum @@ -53,8 +53,8 @@ github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JP github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= -github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= @@ -244,8 +244,8 @@ github.com/containers/podman/v4 v4.5.1/go.mod h1:BoNmT1QNzMtDMUCiJ1j1ZoDx6OOn5BA github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY= github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc= github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s= -github.com/containers/storage v1.46.1 h1:GcAe8J0Y6T2CF70fXPojUpnme6zXamuzGCrNujVtIGE= -github.com/containers/storage v1.46.1/go.mod h1:81vNDX4h+nXJ2o0D6Yqy6JGXDYJGVpHZpz0nr09iJuQ= +github.com/containers/storage v1.48.0 h1:wiPs8J2xiFoOEAhxHDRtP6A90Jzj57VqzLRXOqeizns= +github.com/containers/storage v1.48.0/go.mod h1:pRp3lkRo2qodb/ltpnudoXggrviRmaCmU5a5GhTBae0= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= @@ -596,11 +596,11 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.16.4 h1:91KN02FnsOYhuunwU4ssRe8lc2JosWmizWa91B5v1PU= -github.com/klauspost/compress v1.16.4/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk= +github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 h1:BcxbplxjtczA1a6d3wYoa7a0WL3rq9DKBMGHeKyjEF0= -github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -659,8 +659,9 @@ github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WT github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= -github.com/mistifyio/go-zfs/v3 v3.0.0 h1:J5QK618xRcXnQYZ2GE5FdmpS1ufIrWue+lR/mpe6/14= github.com/mistifyio/go-zfs/v3 v3.0.0/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= +github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU= +github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -741,8 +742,8 @@ github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.m github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.1.0-rc.2 h1:ucBtEms2tamYYW/SvGpvq9yUN0NEVL6oyLEwDcTSrk8= -github.com/opencontainers/runtime-spec v1.1.0-rc.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.1.0-rc.3 h1:l04uafi6kxByhbxev7OWiuUv0LZxEsYUfDWZ6bztAuU= +github.com/opencontainers/runtime-spec v1.1.0-rc.3/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69 h1:NL4xDvl68WWqQ+8WPMM3l5PsZTxaT7Z4K3VSKDRuAGs= github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69/go.mod h1:bNpfuSHA3DZRtD0TPWO8LzgtLpFPTVA/3jDkzD/OPyk= @@ -884,7 +885,7 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/sylabs/sif/v2 v2.11.1 h1:d09yPukVa8b74wuy+QTA4Is3w8MH0UjO/xlWQUuFzpY= github.com/sylabs/sif/v2 v2.11.1/go.mod h1:i4GcKLOaT4ertznbsuf11d/G9zLEfUZa7YhrFc5L6YQ= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= diff --git a/vendor/github.com/Microsoft/go-winio/.golangci.yml b/vendor/github.com/Microsoft/go-winio/.golangci.yml index af403bb13..7b503d26a 100644 --- a/vendor/github.com/Microsoft/go-winio/.golangci.yml +++ b/vendor/github.com/Microsoft/go-winio/.golangci.yml @@ -8,12 +8,8 @@ linters: - containedctx # struct contains a context - dupl # duplicate code - errname # erorrs are named correctly - - goconst # strings that should be constants - - godot # comments end in a period - - misspell - nolintlint # "//nolint" directives are properly explained - revive # golint replacement - - stylecheck # golint replacement, less configurable than revive - unconvert # unnecessary conversions - wastedassign @@ -23,10 +19,7 @@ linters: - exhaustive # check exhaustiveness of enum switch statements - gofmt # files are gofmt'ed - gosec # security - - nestif # deeply nested ifs - nilerr # returns nil even with non-nil error - - prealloc # slices that can be pre-allocated - - structcheck # unused struct fields - unparam # unused function params issues: @@ -42,6 +35,18 @@ issues: text: "^line-length-limit: " source: "^//(go:generate|sys) " + #TODO: remove after upgrading to go1.18 + # ignore comment spacing for nolint and sys directives + - linters: + - revive + text: "^comment-spacings: no space between comment delimiter and comment text" + source: "//(cspell:|nolint:|sys |todo)" + + # not on go 1.18 yet, so no any + - linters: + - revive + text: "^use-any: since GO 1.18 'interface{}' can be replaced by 'any'" + # allow unjustified ignores of error checks in defer statements - linters: - nolintlint @@ -56,6 +61,8 @@ issues: linters-settings: + exhaustive: + default-signifies-exhaustive: true govet: enable-all: true disable: @@ -98,6 +105,8 @@ linters-settings: disabled: true - name: flag-parameter # excessive, and a common idiom we use disabled: true + - name: unhandled-error # warns over common fmt.Print* and io.Close; rely on errcheck instead + disabled: true # general config - name: line-length-limit arguments: @@ -138,7 +147,3 @@ linters-settings: - VPCI - WCOW - WIM - stylecheck: - checks: - - "all" - - "-ST1003" # use revive's var naming diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go index 52f1c280f..c88191658 100644 --- a/vendor/github.com/Microsoft/go-winio/hvsock.go +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -23,7 +23,7 @@ import ( const afHVSock = 34 // AF_HYPERV // Well known Service and VM IDs -//https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards +// https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards // HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions. func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000 @@ -31,7 +31,7 @@ func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000 } // HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions. -func HvsockGUIDBroadcast() guid.GUID { //ffffffff-ffff-ffff-ffff-ffffffffffff +func HvsockGUIDBroadcast() guid.GUID { // ffffffff-ffff-ffff-ffff-ffffffffffff return guid.GUID{ Data1: 0xffffffff, Data2: 0xffff, @@ -246,7 +246,7 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) { var addrbuf [addrlen * 2]byte var bytes uint32 - err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /*rxdatalen*/, addrlen, addrlen, &bytes, &c.o) + err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o) if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil { return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) } diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go b/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go new file mode 100644 index 000000000..1f6538817 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go @@ -0,0 +1,2 @@ +// This package contains Win32 filesystem functionality. +package fs diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go new file mode 100644 index 000000000..509b3ec64 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go @@ -0,0 +1,202 @@ +//go:build windows + +package fs + +import ( + "golang.org/x/sys/windows" + + "github.com/Microsoft/go-winio/internal/stringbuffer" +) + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go + +// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew +//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW + +const NullHandle windows.Handle = 0 + +// AccessMask defines standard, specific, and generic rights. +// +// Bitmask: +// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 +// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 +// +---------------+---------------+-------------------------------+ +// |G|G|G|G|Resvd|A| StandardRights| SpecificRights | +// |R|W|E|A| |S| | | +// +-+-------------+---------------+-------------------------------+ +// +// GR Generic Read +// GW Generic Write +// GE Generic Exectue +// GA Generic All +// Resvd Reserved +// AS Access Security System +// +// https://learn.microsoft.com/en-us/windows/win32/secauthz/access-mask +// +// https://learn.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights +// +// https://learn.microsoft.com/en-us/windows/win32/fileio/file-access-rights-constants +type AccessMask = windows.ACCESS_MASK + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // Not actually any. + // + // For CreateFile: "query certain metadata such as file, directory, or device attributes without accessing that file or device" + // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters + FILE_ANY_ACCESS AccessMask = 0 + + // Specific Object Access + // from ntioapi.h + + FILE_READ_DATA AccessMask = (0x0001) // file & pipe + FILE_LIST_DIRECTORY AccessMask = (0x0001) // directory + + FILE_WRITE_DATA AccessMask = (0x0002) // file & pipe + FILE_ADD_FILE AccessMask = (0x0002) // directory + + FILE_APPEND_DATA AccessMask = (0x0004) // file + FILE_ADD_SUBDIRECTORY AccessMask = (0x0004) // directory + FILE_CREATE_PIPE_INSTANCE AccessMask = (0x0004) // named pipe + + FILE_READ_EA AccessMask = (0x0008) // file & directory + FILE_READ_PROPERTIES AccessMask = FILE_READ_EA + + FILE_WRITE_EA AccessMask = (0x0010) // file & directory + FILE_WRITE_PROPERTIES AccessMask = FILE_WRITE_EA + + FILE_EXECUTE AccessMask = (0x0020) // file + FILE_TRAVERSE AccessMask = (0x0020) // directory + + FILE_DELETE_CHILD AccessMask = (0x0040) // directory + + FILE_READ_ATTRIBUTES AccessMask = (0x0080) // all + + FILE_WRITE_ATTRIBUTES AccessMask = (0x0100) // all + + FILE_ALL_ACCESS AccessMask = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1FF) + FILE_GENERIC_READ AccessMask = (STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE) + FILE_GENERIC_WRITE AccessMask = (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE) + FILE_GENERIC_EXECUTE AccessMask = (STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE) + + SPECIFIC_RIGHTS_ALL AccessMask = 0x0000FFFF + + // Standard Access + // from ntseapi.h + + DELETE AccessMask = 0x0001_0000 + READ_CONTROL AccessMask = 0x0002_0000 + WRITE_DAC AccessMask = 0x0004_0000 + WRITE_OWNER AccessMask = 0x0008_0000 + SYNCHRONIZE AccessMask = 0x0010_0000 + + STANDARD_RIGHTS_REQUIRED AccessMask = 0x000F_0000 + + STANDARD_RIGHTS_READ AccessMask = READ_CONTROL + STANDARD_RIGHTS_WRITE AccessMask = READ_CONTROL + STANDARD_RIGHTS_EXECUTE AccessMask = READ_CONTROL + + STANDARD_RIGHTS_ALL AccessMask = 0x001F_0000 +) + +type FileShareMode uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + FILE_SHARE_NONE FileShareMode = 0x00 + FILE_SHARE_READ FileShareMode = 0x01 + FILE_SHARE_WRITE FileShareMode = 0x02 + FILE_SHARE_DELETE FileShareMode = 0x04 + FILE_SHARE_VALID_FLAGS FileShareMode = 0x07 +) + +type FileCreationDisposition uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // from winbase.h + + CREATE_NEW FileCreationDisposition = 0x01 + CREATE_ALWAYS FileCreationDisposition = 0x02 + OPEN_EXISTING FileCreationDisposition = 0x03 + OPEN_ALWAYS FileCreationDisposition = 0x04 + TRUNCATE_EXISTING FileCreationDisposition = 0x05 +) + +// CreateFile and co. take flags or attributes together as one parameter. +// Define alias until we can use generics to allow both + +// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants +type FileFlagOrAttribute uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( // from winnt.h + FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000 + FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000 + FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000 + FILE_FLAG_RANDOM_ACCESS FileFlagOrAttribute = 0x1000_0000 + FILE_FLAG_SEQUENTIAL_SCAN FileFlagOrAttribute = 0x0800_0000 + FILE_FLAG_DELETE_ON_CLOSE FileFlagOrAttribute = 0x0400_0000 + FILE_FLAG_BACKUP_SEMANTICS FileFlagOrAttribute = 0x0200_0000 + FILE_FLAG_POSIX_SEMANTICS FileFlagOrAttribute = 0x0100_0000 + FILE_FLAG_OPEN_REPARSE_POINT FileFlagOrAttribute = 0x0020_0000 + FILE_FLAG_OPEN_NO_RECALL FileFlagOrAttribute = 0x0010_0000 + FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000 +) + +type FileSQSFlag = FileFlagOrAttribute + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( // from winbase.h + SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16) + SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16) + SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16) + SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16) + + SECURITY_SQOS_PRESENT FileSQSFlag = 0x00100000 + SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F0000 +) + +// GetFinalPathNameByHandle flags +// +// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew#parameters +type GetFinalPathFlag uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + GetFinalPathDefaultFlag GetFinalPathFlag = 0x0 + + FILE_NAME_NORMALIZED GetFinalPathFlag = 0x0 + FILE_NAME_OPENED GetFinalPathFlag = 0x8 + + VOLUME_NAME_DOS GetFinalPathFlag = 0x0 + VOLUME_NAME_GUID GetFinalPathFlag = 0x1 + VOLUME_NAME_NT GetFinalPathFlag = 0x2 + VOLUME_NAME_NONE GetFinalPathFlag = 0x4 +) + +// getFinalPathNameByHandle facilitates calling the Windows API GetFinalPathNameByHandle +// with the given handle and flags. It transparently takes care of creating a buffer of the +// correct size for the call. +// +// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew +func GetFinalPathNameByHandle(h windows.Handle, flags GetFinalPathFlag) (string, error) { + b := stringbuffer.NewWString() + //TODO: can loop infinitely if Win32 keeps returning the same (or a larger) n? + for { + n, err := windows.GetFinalPathNameByHandle(h, b.Pointer(), b.Cap(), uint32(flags)) + if err != nil { + return "", err + } + // If the buffer wasn't large enough, n will be the total size needed (including null terminator). + // Resize and try again. + if n > b.Cap() { + b.ResizeTo(n) + continue + } + // If the buffer is large enough, n will be the size not including the null terminator. + // Convert to a Go string and return. + return b.String(), nil + } +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/security.go b/vendor/github.com/Microsoft/go-winio/internal/fs/security.go new file mode 100644 index 000000000..81760ac67 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/security.go @@ -0,0 +1,12 @@ +package fs + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level +type SecurityImpersonationLevel int32 // C default enums underlying type is `int`, which is Go `int32` + +// Impersonation levels +const ( + SecurityAnonymous SecurityImpersonationLevel = 0 + SecurityIdentification SecurityImpersonationLevel = 1 + SecurityImpersonation SecurityImpersonationLevel = 2 + SecurityDelegation SecurityImpersonationLevel = 3 +) diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go new file mode 100644 index 000000000..e2f7bb24e --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go @@ -0,0 +1,64 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package fs + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procCreateFileW = modkernel32.NewProc("CreateFileW") +) + +func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile) +} + +func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = windows.Handle(r0) + if handle == windows.InvalidHandle { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go index 39e8c05f8..aeb7b7250 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go @@ -100,8 +100,8 @@ func (f *runtimeFunc) Load() error { (*byte)(unsafe.Pointer(&f.addr)), uint32(unsafe.Sizeof(f.addr)), &n, - nil, //overlapped - 0, //completionRoutine + nil, // overlapped + 0, // completionRoutine ) }) return f.err diff --git a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go new file mode 100644 index 000000000..7ad505702 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go @@ -0,0 +1,132 @@ +package stringbuffer + +import ( + "sync" + "unicode/utf16" +) + +// TODO: worth exporting and using in mkwinsyscall? + +// Uint16BufferSize is the buffer size in the pool, chosen somewhat arbitrarily to accommodate +// large path strings: +// MAX_PATH (260) + size of volume GUID prefix (49) + null terminator = 310. +const MinWStringCap = 310 + +// use *[]uint16 since []uint16 creates an extra allocation where the slice header +// is copied to heap and then referenced via pointer in the interface header that sync.Pool +// stores. +var pathPool = sync.Pool{ // if go1.18+ adds Pool[T], use that to store []uint16 directly + New: func() interface{} { + b := make([]uint16, MinWStringCap) + return &b + }, +} + +func newBuffer() []uint16 { return *(pathPool.Get().(*[]uint16)) } + +// freeBuffer copies the slice header data, and puts a pointer to that in the pool. +// This avoids taking a pointer to the slice header in WString, which can be set to nil. +func freeBuffer(b []uint16) { pathPool.Put(&b) } + +// WString is a wide string buffer ([]uint16) meant for storing UTF-16 encoded strings +// for interacting with Win32 APIs. +// Sizes are specified as uint32 and not int. +// +// It is not thread safe. +type WString struct { + // type-def allows casting to []uint16 directly, use struct to prevent that and allow adding fields in the future. + + // raw buffer + b []uint16 +} + +// NewWString returns a [WString] allocated from a shared pool with an +// initial capacity of at least [MinWStringCap]. +// Since the buffer may have been previously used, its contents are not guaranteed to be empty. +// +// The buffer should be freed via [WString.Free] +func NewWString() *WString { + return &WString{ + b: newBuffer(), + } +} + +func (b *WString) Free() { + if b.empty() { + return + } + freeBuffer(b.b) + b.b = nil +} + +// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the +// previous buffer back into pool. +func (b *WString) ResizeTo(c uint32) uint32 { + // allready sufficient (or n is 0) + if c <= b.Cap() { + return b.Cap() + } + + if c <= MinWStringCap { + c = MinWStringCap + } + // allocate at-least double buffer size, as is done in [bytes.Buffer] and other places + if c <= 2*b.Cap() { + c = 2 * b.Cap() + } + + b2 := make([]uint16, c) + if !b.empty() { + copy(b2, b.b) + freeBuffer(b.b) + } + b.b = b2 + return c +} + +// Buffer returns the underlying []uint16 buffer. +func (b *WString) Buffer() []uint16 { + if b.empty() { + return nil + } + return b.b +} + +// Pointer returns a pointer to the first uint16 in the buffer. +// If the [WString.Free] has already been called, the pointer will be nil. +func (b *WString) Pointer() *uint16 { + if b.empty() { + return nil + } + return &b.b[0] +} + +// String returns the returns the UTF-8 encoding of the UTF-16 string in the buffer. +// +// It assumes that the data is null-terminated. +func (b *WString) String() string { + // Using [windows.UTF16ToString] would require importing "golang.org/x/sys/windows" + // and would make this code Windows-only, which makes no sense. + // So copy UTF16ToString code into here. + // If other windows-specific code is added, switch to [windows.UTF16ToString] + + s := b.b + for i, v := range s { + if v == 0 { + s = s[:i] + break + } + } + return string(utf16.Decode(s)) +} + +// Cap returns the underlying buffer capacity. +func (b *WString) Cap() uint32 { + if b.empty() { + return 0 + } + return b.cap() +} + +func (b *WString) cap() uint32 { return uint32(cap(b.b)) } +func (b *WString) empty() bool { return b == nil || b.cap() == 0 } diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go index ca6e38fc0..25cc81103 100644 --- a/vendor/github.com/Microsoft/go-winio/pipe.go +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -16,11 +16,12 @@ import ( "unsafe" "golang.org/x/sys/windows" + + "github.com/Microsoft/go-winio/internal/fs" ) //sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe //sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW -//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW //sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo //sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc @@ -163,19 +164,21 @@ func (s pipeAddress) String() string { } // tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. -func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) { +func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask) (syscall.Handle, error) { for { select { case <-ctx.Done(): return syscall.Handle(0), ctx.Err() default: - h, err := createFile(*path, + wh, err := fs.CreateFile(*path, access, - 0, - nil, - syscall.OPEN_EXISTING, - windows.FILE_FLAG_OVERLAPPED|windows.SECURITY_SQOS_PRESENT|windows.SECURITY_ANONYMOUS, - 0) + 0, // mode + nil, // security attributes + fs.OPEN_EXISTING, + fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.SECURITY_ANONYMOUS, + 0, // template file handle + ) + h := syscall.Handle(wh) if err == nil { return h, nil } @@ -219,7 +222,7 @@ func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { var err error var h syscall.Handle - h, err = tryDialPipe(ctx, &path, access) + h, err = tryDialPipe(ctx, &path, fs.AccessMask(access)) if err != nil { return nil, err } @@ -279,6 +282,7 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy } defer localFree(ntPath.Buffer) oa.ObjectName = &ntPath + oa.Attributes = windows.OBJ_CASE_INSENSITIVE // The security descriptor is only needed for the first pipe. if first { diff --git a/vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/mkwinsyscall.go b/vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/mkwinsyscall.go index e72be3138..20d9e3d27 100644 --- a/vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/mkwinsyscall.go +++ b/vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/mkwinsyscall.go @@ -477,15 +477,14 @@ func newFn(s string) (*Fn, error) { return nil, errors.New("Could not extract dll name from \"" + f.src + "\"") } s = trim(s[1:]) - a := strings.Split(s, ".") - switch len(a) { - case 1: - f.dllfuncname = a[0] - case 2: - f.dllname = a[0] - f.dllfuncname = a[1] - default: - return nil, errors.New("Could not extract dll name from \"" + f.src + "\"") + if i := strings.LastIndex(s, "."); i >= 0 { + f.dllname = s[:i] + f.dllfuncname = s[i+1:] + } else { + f.dllfuncname = s + } + if f.dllfuncname == "" { + return nil, fmt.Errorf("function name is not specified in %q", s) } if n := f.dllfuncname; endsIn(n, '?') { f.dllfuncname = n[:len(n)-1] @@ -502,7 +501,23 @@ func (f *Fn) DLLName() string { return f.dllname } -// DLLName returns DLL function name for function f. +// DLLVar returns a valid Go identifier that represents DLLName. +func (f *Fn) DLLVar() string { + id := strings.Map(func(r rune) rune { + switch r { + case '.', '-': + return '_' + default: + return r + } + }, f.DLLName()) + if !token.IsIdentifier(id) { + panic(fmt.Errorf("could not create Go identifier for DLLName %q", f.DLLName())) + } + return id +} + +// DLLFuncName returns DLL function name for function f. func (f *Fn) DLLFuncName() string { if f.dllfuncname == "" { return f.Name @@ -648,6 +663,13 @@ func (f *Fn) HelperName() string { return "_" + f.Name } +// DLL is a DLL's filename and a string that is valid in a Go identifier that should be used when +// naming a variable that refers to the DLL. +type DLL struct { + Name string + Var string +} + // Source files and functions. type Source struct { Funcs []*Fn @@ -697,18 +719,20 @@ func ParseFiles(fs []string) (*Source, error) { } // DLLs return dll names for a source set src. -func (src *Source) DLLs() []string { +func (src *Source) DLLs() []DLL { uniq := make(map[string]bool) - r := make([]string, 0) + r := make([]DLL, 0) for _, f := range src.Funcs { - name := f.DLLName() - if _, found := uniq[name]; !found { - uniq[name] = true - r = append(r, name) + id := f.DLLVar() + if _, found := uniq[id]; !found { + uniq[id] = true + r = append(r, DLL{f.DLLName(), id}) } } if *sortdecls { - sort.Strings(r) + sort.Slice(r, func(i, j int) bool { + return r[i].Var < r[j].Var + }) } return r } @@ -878,6 +902,22 @@ func (src *Source) Generate(w io.Writer) error { return nil } +func writeTempSourceFile(data []byte) (string, error) { + f, err := os.CreateTemp("", "mkwinsyscall-generated-*.go") + if err != nil { + return "", err + } + _, err = f.Write(data) + if closeErr := f.Close(); err == nil { + err = closeErr + } + if err != nil { + os.Remove(f.Name()) // best effort + return "", err + } + return f.Name(), nil +} + func usage() { fmt.Fprintf(os.Stderr, "usage: mkwinsyscall [flags] [path ...]\n") flag.PrintDefaults() @@ -904,7 +944,12 @@ func main() { data, err := format.Source(buf.Bytes()) if err != nil { - log.Fatal(err) + log.Printf("failed to format source: %v", err) + f, err := writeTempSourceFile(buf.Bytes()) + if err != nil { + log.Fatalf("failed to write unformatted source to file: %v", err) + } + log.Fatalf("for diagnosis, wrote unformatted source to %v", f) } if *filename == "" { _, err = os.Stdout.Write(data) @@ -970,10 +1015,10 @@ var ( {{/* help functions */}} -{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{newlazydll .}} +{{define "dlls"}}{{range .DLLs}} mod{{.Var}} = {{newlazydll .Name}} {{end}}{{end}} -{{define "funcnames"}}{{range .DLLFuncNames}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}") +{{define "funcnames"}}{{range .DLLFuncNames}} proc{{.DLLFuncName}} = mod{{.DLLVar}}.NewProc("{{.DLLFuncName}}") {{end}}{{end}} {{define "helperbody"}} diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go index 83f45a135..469b16f63 100644 --- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -63,7 +63,6 @@ var ( procBackupWrite = modkernel32.NewProc("BackupWrite") procCancelIoEx = modkernel32.NewProc("CancelIoEx") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procCreateFileW = modkernel32.NewProc("CreateFileW") procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") @@ -305,24 +304,6 @@ func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { return } -func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) -} - -func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = errnoErr(e1) - } - return -} - func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) newport = syscall.Handle(r0) diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index 4e3ba7317..8ef38e2cd 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -17,13 +17,13 @@ env: #### #### Cache-image names to test with (double-quotes around names are critical) ### - FEDORA_NAME: "fedora-37" - DEBIAN_NAME: "debian-12" + FEDORA_NAME: "fedora-38" + DEBIAN_NAME: "debian-13" # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - IMAGE_SUFFIX: "c20230405t152256z-f37f36d12" + IMAGE_SUFFIX: "c20230614t132754z-f38f37d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" @@ -56,7 +56,6 @@ gce_instance: linux_testing: &linux_testing depends_on: - lint - only_if: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' gce_instance: # Only need to specify differences from defaults (above) image_name: "${VM_IMAGE}" @@ -127,10 +126,12 @@ lint_task: fingerprint_script: cat go.sum folder: $GOPATH/pkg/mod build_script: | - echo "deb http://deb.debian.org/debian stretch-backports main" > /etc/apt/sources.list.d/backports.list apt-get update apt-get install -y libbtrfs-dev libdevmapper-dev - test_script: make TAGS=regex_precompile local-validate && make lint && make clean + test_script: | + make TAGS=regex_precompile local-validate + make lint + make clean # Update metadata on VM images referenced by this repository state @@ -168,7 +169,7 @@ vendor_task: cross_task: container: - image: golang:1.17 + image: golang:1.19 build_script: make cross @@ -182,6 +183,6 @@ success_task: - vendor - cross container: - image: golang:1.17 + image: golang:1.19 clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed script: /bin/true diff --git a/vendor/github.com/containers/storage/.golangci.yml b/vendor/github.com/containers/storage/.golangci.yml index 755aa35c0..20968466c 100644 --- a/vendor/github.com/containers/storage/.golangci.yml +++ b/vendor/github.com/containers/storage/.golangci.yml @@ -4,68 +4,8 @@ run: deadline: 5m skip-dirs-use-default: true linters: - enable-all: true + enable: + - gofumpt disable: - - cyclop - - deadcode - - dogsled - - dupl - errcheck - - errname - - errorlint - - exhaustive - - exhaustivestruct - - exhaustruct - - forbidigo - - forcetypeassert - - funlen - - gci - - gochecknoglobals - - gochecknoinits - - gocognit - - gocritic - - gocyclo - - godot - - godox - - goerr113 - - gofumpt - - golint - - gomnd - - gosec - - gosimple - - govet - - ifshort - - ineffassign - - interfacer - - interfacebloat - - ireturn - - lll - - maintidx - - maligned - - misspell - - musttag - - nakedret - - nestif - - nlreturn - - nolintlint - - nonamedreturns - - nosnakecase - - paralleltest - - prealloc - - predeclared - - rowserrcheck - - scopelint - staticcheck - - structcheck - - stylecheck - - tagliatelle - - testpackage - - thelper - - unconvert - - unparam - - varcheck - - varnamelen - - wastedassign - - whitespace - - wrapcheck - - wsl diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index 52266da0f..6cb354c2c 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -1,13 +1,18 @@ -export GO111MODULE=off -export GOPROXY=https://proxy.golang.org - .PHONY: \ all \ + binary \ clean \ + codespell \ + containers-storage \ + cross \ default \ docs \ + gccgo \ help \ + install \ + install.docs \ install.tools \ + lint \ local-binary \ local-cross \ local-gccgo \ @@ -15,33 +20,25 @@ export GOPROXY=https://proxy.golang.org local-test-integration \ local-test-unit \ local-validate \ - lint \ - vendor + test-integration \ + test-unit \ + validate \ + vendor \ + vendor-in-container -PACKAGE := github.com/containers/storage -GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) -GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") -EPOCH_TEST_COMMIT := 0418ebf59f9e1f564831c0ba9378b7f8e40a1c73 NATIVETAGS := AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/libsubid_tag.sh) BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS) GO ?= go TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race) -# Go module support: set `-mod=vendor` to use the vendored sources -ifeq ($(shell $(GO) help mod >/dev/null 2>&1 && echo true), true) - GO:=GO111MODULE=on $(GO) - MOD_VENDOR=-mod=vendor -endif - default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs clean: ## remove all built files $(RM) -f containers-storage containers-storage.* docs/*.1 docs/*.5 -sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*.go internal/*/*.go pkg/*/*.go pkg/*/*/*.go types/*.go) -containers-storage: $(sources) ## build using gc on the host - $(GO) build $(MOD_VENDOR) -compiler gc $(BUILDFLAGS) ./cmd/containers-storage +containers-storage: ## build using gc on the host + $(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage codespell: codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L worl,flate,uint,iff,od,ERRO -w @@ -49,15 +46,15 @@ codespell: binary local-binary: containers-storage local-gccgo gccgo: ## build using gccgo on the host - GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build $(MOD_VENDOR) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage + GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage local-cross cross: ## cross build the binaries for arm, darwin, and freebsd @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le linux/s390x linux/mips linux/mipsle linux/mips64 linux/mips64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \ os=`echo $${target} | cut -f1 -d/` ; \ arch=`echo $${target} | cut -f2 -d/` ; \ suffix=$${os}.$${arch} ; \ - echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build $(MOD_VENDOR) -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage ; \ - env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build $(MOD_VENDOR) -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage || exit 1 ; \ + echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage ; \ + env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage || exit 1 ; \ done docs: install.tools ## build the docs on the host @@ -66,21 +63,17 @@ docs: install.tools ## build the docs on the host local-test: local-binary local-test-unit local-test-integration ## build the binaries and run the tests local-test-unit test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges) - @$(GO) test $(MOD_VENDOR) $(BUILDFLAGS) $(TESTFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor) + @$(GO) test -count 1 $(BUILDFLAGS) $(TESTFLAGS) ./... local-test-integration test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges) @cd tests; ./test_runner.bash -local-validate validate: install.tools ## validate DCO and gofmt on the host +local-validate validate: install.tools ## validate DCO on the host @./hack/git-validation.sh - @./hack/gofmt.sh install.tools: $(MAKE) -C tests/tools -$(FFJSON): - $(MAKE) -C tests/tools - install.docs: docs $(MAKE) -C docs install diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index aa3ed3a5e..9db5ea12f 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.46.1 +1.48.0 diff --git a/vendor/github.com/containers/storage/check.go b/vendor/github.com/containers/storage/check.go new file mode 100644 index 000000000..e58084fc7 --- /dev/null +++ b/vendor/github.com/containers/storage/check.go @@ -0,0 +1,1153 @@ +package storage + +import ( + "archive/tar" + "errors" + "fmt" + "io" + "os" + "path" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/types" + "github.com/sirupsen/logrus" +) + +var ( + // ErrLayerUnaccounted describes a layer that is present in the lower-level storage driver, + // but which is not known to or managed by the higher-level driver-agnostic logic. + ErrLayerUnaccounted = types.ErrLayerUnaccounted + // ErrLayerUnreferenced describes a layer which is not used by any image or container. + ErrLayerUnreferenced = types.ErrLayerUnreferenced + // ErrLayerIncorrectContentDigest describes a layer for which the contents of one or more + // files which were added in the layer appear to have changed. It may instead look like an + // unnamed "file integrity checksum failed" error. + ErrLayerIncorrectContentDigest = types.ErrLayerIncorrectContentDigest + // ErrLayerIncorrectContentSize describes a layer for which regenerating the diff that was + // used to populate the layer produced a diff of a different size. We check the digest + // first, so it's highly unlikely you'll ever see this error. + ErrLayerIncorrectContentSize = types.ErrLayerIncorrectContentSize + // ErrLayerContentModified describes a layer which contains contents which should not be + // there, or for which ownership/permissions/dates have been changed. + ErrLayerContentModified = types.ErrLayerContentModified + // ErrLayerDataMissing describes a layer which is missing a big data item. + ErrLayerDataMissing = types.ErrLayerDataMissing + // ErrLayerMissing describes a layer which is the missing parent of a layer. + ErrLayerMissing = types.ErrLayerMissing + // ErrImageLayerMissing describes an image which claims to have a layer that we don't know + // about. + ErrImageLayerMissing = types.ErrImageLayerMissing + // ErrImageDataMissing describes an image which is missing a big data item. + ErrImageDataMissing = types.ErrImageDataMissing + // ErrImageDataIncorrectSize describes an image which has a big data item which looks like + // its size has changed, likely because it's been modified somehow. + ErrImageDataIncorrectSize = types.ErrImageDataIncorrectSize + // ErrContainerImageMissing describes a container which claims to be based on an image that + // we don't know about. + ErrContainerImageMissing = types.ErrContainerImageMissing + // ErrContainerDataMissing describes a container which is missing a big data item. + ErrContainerDataMissing = types.ErrContainerDataMissing + // ErrContainerDataIncorrectSize describes a container which has a big data item which looks + // like its size has changed, likely because it's been modified somehow. + ErrContainerDataIncorrectSize = types.ErrContainerDataIncorrectSize +) + +const ( + defaultMaximumUnreferencedLayerAge = 24 * time.Hour +) + +// CheckOptions is the set of options for Check(), specifying which tests to perform. +type CheckOptions struct { + LayerUnreferencedMaximumAge *time.Duration // maximum allowed age of unreferenced layers + LayerDigests bool // check that contents of image layer diffs can still be reconstructed + LayerMountable bool // check that layers are mountable + LayerContents bool // check that contents of image layers match their diffs, with no unexpected changes, requires LayerMountable + LayerData bool // check that associated "big" data items are present and can be read + ImageData bool // check that associated "big" data items are present, can be read, and match the recorded size + ContainerData bool // check that associated "big" data items are present and can be read +} + +// checkIgnore is used to tell functions that compare the contents of a mounted +// layer to the contents that we'd expect it to have to ignore certain +// discrepancies +type checkIgnore struct { + ownership, timestamps, permissions bool +} + +// CheckMost returns a CheckOptions with mostly just "quick" checks enabled. +func CheckMost() *CheckOptions { + return &CheckOptions{ + LayerDigests: true, + LayerMountable: true, + LayerContents: false, + LayerData: true, + ImageData: true, + ContainerData: true, + } +} + +// CheckEverything returns a CheckOptions with every check enabled. +func CheckEverything() *CheckOptions { + return &CheckOptions{ + LayerDigests: true, + LayerMountable: true, + LayerContents: true, + LayerData: true, + ImageData: true, + ContainerData: true, + } +} + +// CheckReport is a list of detected problems. +type CheckReport struct { + Layers map[string][]error // damaged read-write layers + ROLayers map[string][]error // damaged read-only layers + layerParentsByLayerID map[string]string + layerOrder map[string]int + Images map[string][]error // damaged read-write images (including those with damaged layers) + ROImages map[string][]error // damaged read-only images (including those with damaged layers) + Containers map[string][]error // damaged containers (including those based on damaged images) +} + +// RepairOptions is the set of options for Repair(). +type RepairOptions struct { + RemoveContainers bool // Remove damaged containers +} + +// RepairEverything returns a RepairOptions with every optional remediation +// enabled. +func RepairEverything() *RepairOptions { + return &RepairOptions{ + RemoveContainers: true, + } +} + +// Check returns a list of problems with what's in the store, as a whole. It can be very expensive +// to call. +func (s *store) Check(options *CheckOptions) (CheckReport, error) { + var ignore checkIgnore + for _, o := range s.graphOptions { + if strings.Contains(o, "ignore_chown_errors=true") { + ignore.ownership = true + } + if strings.HasPrefix(o, "force_mask=") { + ignore.permissions = true + } + } + for o := range s.pullOptions { + if strings.Contains(o, "use_hard_links") { + if s.pullOptions[o] == "true" { + ignore.timestamps = true + } + } + } + + if options == nil { + options = CheckMost() + } + + report := CheckReport{ + Layers: make(map[string][]error), + ROLayers: make(map[string][]error), + layerParentsByLayerID: make(map[string]string), // layers ID -> their parent's ID, if there is one + layerOrder: make(map[string]int), // layers ID -> order for removal, if we needed to remove them all + Images: make(map[string][]error), + ROImages: make(map[string][]error), + Containers: make(map[string][]error), + } + + // This map will track known layer IDs. If we have multiple stores, read-only ones can + // contain copies of layers that are in the read-write store, but we'll only ever be + // mounting or extracting contents from the read-write versions, since we always search it + // first. The boolean will track if the layer is referenced by at least one image or + // container. + referencedLayers := make(map[string]bool) + referencedROLayers := make(map[string]bool) + + // This map caches the headers for items included in layer diffs. + diffHeadersByLayer := make(map[string][]*tar.Header) + var diffHeadersByLayerMutex sync.Mutex + + // Walk the list of layer stores, looking at each layer that we didn't see in a + // previously-visited store. + if _, _, err := readOrWriteAllLayerStores(s, func(store roLayerStore) (struct{}, bool, error) { + layers, err := store.Layers() + if err != nil { + return struct{}{}, true, err + } + isReadWrite := roLayerStoreIsReallyReadWrite(store) + readWriteDesc := "" + if !isReadWrite { + readWriteDesc = "read-only " + } + // Examine each layer in turn. + for i := range layers { + layer := layers[i] + id := layer.ID + // If we've already seen a layer with this ID, no need to process it again. + if _, checked := referencedLayers[id]; checked { + continue + } + if _, checked := referencedROLayers[id]; checked { + continue + } + // Note the parent of this layer, and add it to the map of known layers so + // that we know that we've visited it, but we haven't confirmed that it's + // used by anything. + report.layerParentsByLayerID[id] = layer.Parent + if isReadWrite { + referencedLayers[id] = false + } else { + referencedROLayers[id] = false + } + logrus.Debugf("checking %slayer %s", readWriteDesc, id) + // Check that all of the big data items are present and can be read. We + // have no digest or size information to compare the contents to (grumble), + // so we can't verify that the contents haven't been changed since they + // were stored. + if options.LayerData { + for _, name := range layer.BigDataNames { + func() { + rc, err := store.BigData(id, name) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + err := fmt.Errorf("%slayer %s: data item %q: %w", readWriteDesc, id, name, ErrLayerDataMissing) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + err = fmt.Errorf("%slayer %s: data item %q: %w", readWriteDesc, id, name, err) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + defer rc.Close() + if _, err = io.Copy(io.Discard, rc); err != nil { + err = fmt.Errorf("%slayer %s: data item %q: %w", readWriteDesc, id, name, err) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + }() + } + } + // Check that the content we get back when extracting the layer's contents + // match the recorded digest and size. A layer for which they're not given + // isn't a part of an image, and is likely the read-write layer for a + // container, and we can't vouch for the integrity of its contents. + // For each layer with known contents, record the headers for the layer's + // diff, which we can use to reconstruct the expected contents for the tree + // we see when the layer is mounted. + if options.LayerDigests && layer.UncompressedDigest != "" { + func() { + expectedDigest := layer.UncompressedDigest + // Double-check that the digest isn't invalid somehow. + if err := layer.UncompressedDigest.Validate(); err != nil { + err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + // Extract the diff. + uncompressed := archive.Uncompressed + diffOptions := DiffOptions{ + Compression: &uncompressed, + } + diff, err := store.Diff("", id, &diffOptions) + if err != nil { + err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + // Digest and count the length of the diff. + digester := expectedDigest.Algorithm().Digester() + counter := ioutils.NewWriteCounter(digester.Hash()) + reader := io.TeeReader(diff, counter) + var wg sync.WaitGroup + var archiveErr error + wg.Add(1) + go func(layerID string, diffReader io.Reader) { + // Read the diff, one item at a time. + tr := tar.NewReader(diffReader) + hdr, err := tr.Next() + for err == nil { + diffHeadersByLayerMutex.Lock() + diffHeadersByLayer[layerID] = append(diffHeadersByLayer[layerID], hdr) + diffHeadersByLayerMutex.Unlock() + hdr, err = tr.Next() + } + if !errors.Is(err, io.EOF) { + archiveErr = err + } + // consume any trailer after the EOF marker + io.Copy(io.Discard, diffReader) + wg.Done() + }(id, reader) + wg.Wait() + diff.Close() + if archiveErr != nil { + // Reading the diff didn't end as expected + diffHeadersByLayerMutex.Lock() + delete(diffHeadersByLayer, id) + diffHeadersByLayerMutex.Unlock() + archiveErr = fmt.Errorf("%slayer %s: %w", readWriteDesc, id, archiveErr) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], archiveErr) + } else { + report.ROLayers[id] = append(report.ROLayers[id], archiveErr) + } + return + } + if digester.Digest() != layer.UncompressedDigest { + // The diff digest didn't match. + diffHeadersByLayerMutex.Lock() + delete(diffHeadersByLayer, id) + diffHeadersByLayerMutex.Unlock() + err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, ErrLayerIncorrectContentDigest) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + } + if layer.UncompressedSize != -1 && counter.Count != layer.UncompressedSize { + // We expected the diff to have a specific size, and + // it didn't match. + diffHeadersByLayerMutex.Lock() + delete(diffHeadersByLayer, id) + diffHeadersByLayerMutex.Unlock() + err := fmt.Errorf("%slayer %s: read %d bytes instead of %d bytes: %w", readWriteDesc, id, counter.Count, layer.UncompressedSize, ErrLayerIncorrectContentSize) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + } + }() + } + } + // At this point we're out of things that we can be sure will work in read-only + // stores, so skip the rest for any stores that aren't also read-write stores. + if !isReadWrite { + return struct{}{}, false, nil + } + // Content and mount checks are also things that we can only be sure will work in + // read-write stores. + for i := range layers { + layer := layers[i] + id := layer.ID + // Compare to what we see when we mount the layer and walk the tree, and + // flag cases where content is in the layer that shouldn't be there. The + // tar-split implementation of Diff() won't catch this problem by itself. + if options.LayerMountable { + func() { + // Mount the layer. + mountPoint, err := s.graphDriver.Get(id, drivers.MountOpts{MountLabel: layer.MountLabel}) + if err != nil { + err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + // Unmount the layer when we're done in here. + defer func() { + if err := s.graphDriver.Put(id); err != nil { + err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + }() + // If we're not looking at layer contents, or we didn't + // look at the diff for this layer, we're done here. + if !options.LayerDigests || layer.UncompressedDigest == "" || !options.LayerContents { + return + } + // Build a list of all of the changes in all of the layers + // that make up the tree we're looking at. + diffHeaderSet := [][]*tar.Header{} + // If we don't know _all_ of the changes that produced this + // layer, it's not part of an image, so we're done here. + for layerID := id; layerID != ""; layerID = report.layerParentsByLayerID[layerID] { + diffHeadersByLayerMutex.Lock() + layerChanges, haveChanges := diffHeadersByLayer[layerID] + diffHeadersByLayerMutex.Unlock() + if !haveChanges { + return + } + // The diff headers for this layer go _before_ those of + // layers that inherited some of its contents. + diffHeaderSet = append([][]*tar.Header{layerChanges}, diffHeaderSet...) + } + expectedCheckDirectory := newCheckDirectoryDefaults() + for _, diffHeaders := range diffHeaderSet { + expectedCheckDirectory.headers(diffHeaders) + } + // Scan the directory tree under the mount point. + var idmap *idtools.IDMappings + if !s.canUseShifting(layer.UIDMap, layer.GIDMap) { + // we would have had to chown() layer contents to match ID maps + idmap = idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap) + } + actualCheckDirectory, err := newCheckDirectoryFromDirectory(mountPoint) + if err != nil { + err := fmt.Errorf("scanning contents of %slayer %s: %w", readWriteDesc, id, err) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + return + } + // Every departure from our expectations is an error. + diffs := compareCheckDirectory(expectedCheckDirectory, actualCheckDirectory, idmap, ignore) + for _, diff := range diffs { + err := fmt.Errorf("%slayer %s: %s, %w", readWriteDesc, id, diff, ErrLayerContentModified) + if isReadWrite { + report.Layers[id] = append(report.Layers[id], err) + } else { + report.ROLayers[id] = append(report.ROLayers[id], err) + } + } + }() + } + } + // Check that we don't have any dangling parent layer references. + for id, parent := range report.layerParentsByLayerID { + // If this layer doesn't have a parent, no problem. + if parent == "" { + continue + } + // If we've already seen a layer with this parent ID, skip it. + if _, checked := referencedLayers[parent]; checked { + continue + } + if _, checked := referencedROLayers[parent]; checked { + continue + } + // We haven't seen a layer with the ID that this layer's record + // says is its parent's ID. + err := fmt.Errorf("%slayer %s: %w", readWriteDesc, parent, ErrLayerMissing) + report.Layers[id] = append(report.Layers[id], err) + } + return struct{}{}, false, nil + }); err != nil { + return CheckReport{}, err + } + + // This map will track examined images. If we have multiple stores, read-only ones can + // contain copies of images that are also in the read-write store, or the read-write store + // may contain a duplicate entry that refers to layers in the read-only stores, but when + // trying to export them, we only look at the first copy of the image. + examinedImages := make(map[string]struct{}) + + // Walk the list of image stores, looking at each image that we didn't see in a + // previously-visited store. + if _, _, err := readAllImageStores(s, func(store roImageStore) (struct{}, bool, error) { + images, err := store.Images() + if err != nil { + return struct{}{}, true, err + } + isReadWrite := roImageStoreIsReallyReadWrite(store) + readWriteDesc := "" + if !isReadWrite { + readWriteDesc = "read-only " + } + // Examine each image in turn. + for i := range images { + image := images[i] + id := image.ID + // If we've already seen an image with this ID, skip it. + if _, checked := examinedImages[id]; checked { + continue + } + examinedImages[id] = struct{}{} + logrus.Debugf("checking %simage %s", readWriteDesc, id) + if options.ImageData { + // Check that all of the big data items are present and reading them + // back gives us the right amount of data. Even though we record + // digests that can be used to look them up, we don't know how they + // were calculated (they're only used as lookup keys), so do not try + // to check them. + for _, key := range image.BigDataNames { + func() { + data, err := store.BigData(id, key) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + err = fmt.Errorf("%simage %s: data item %q: %w", readWriteDesc, id, key, ErrImageDataMissing) + if isReadWrite { + report.Images[id] = append(report.Images[id], err) + } else { + report.ROImages[id] = append(report.ROImages[id], err) + } + return + } + err = fmt.Errorf("%simage %s: data item %q: %w", readWriteDesc, id, key, err) + if isReadWrite { + report.Images[id] = append(report.Images[id], err) + } else { + report.ROImages[id] = append(report.ROImages[id], err) + } + return + } + if int64(len(data)) != image.BigDataSizes[key] { + err = fmt.Errorf("%simage %s: data item %q: %w", readWriteDesc, id, key, ErrImageDataIncorrectSize) + if isReadWrite { + report.Images[id] = append(report.Images[id], err) + } else { + report.ROImages[id] = append(report.ROImages[id], err) + } + return + } + }() + } + } + // Walk the layers list for the image. For every layer that the image uses + // that has errors, the layer's errors are also the image's errors. + examinedImageLayers := make(map[string]struct{}) + for _, topLayer := range append([]string{image.TopLayer}, image.MappedTopLayers...) { + if topLayer == "" { + continue + } + if _, checked := examinedImageLayers[topLayer]; checked { + continue + } + examinedImageLayers[topLayer] = struct{}{} + for layer := topLayer; layer != ""; layer = report.layerParentsByLayerID[layer] { + // The referenced layer should have a corresponding entry in + // one map or the other. + _, checked := referencedLayers[layer] + _, checkedRO := referencedROLayers[layer] + if !checked && !checkedRO { + err := fmt.Errorf("layer %s: %w", layer, ErrImageLayerMissing) + err = fmt.Errorf("%simage %s: %w", readWriteDesc, id, err) + if isReadWrite { + report.Images[id] = append(report.Images[id], err) + } else { + report.ROImages[id] = append(report.ROImages[id], err) + } + } else { + // Count this layer as referenced. Whether by the + // image or one of its child layers doesn't matter + // at this point. + if _, ok := referencedLayers[layer]; ok { + referencedLayers[layer] = true + } + if _, ok := referencedROLayers[layer]; ok { + referencedROLayers[layer] = true + } + } + if isReadWrite { + if len(report.Layers[layer]) > 0 { + report.Images[id] = append(report.Images[id], report.Layers[layer]...) + } + if len(report.ROLayers[layer]) > 0 { + report.Images[id] = append(report.Images[id], report.ROLayers[layer]...) + } + } else { + if len(report.Layers[layer]) > 0 { + report.ROImages[id] = append(report.ROImages[id], report.Layers[layer]...) + } + if len(report.ROLayers[layer]) > 0 { + report.ROImages[id] = append(report.ROImages[id], report.ROLayers[layer]...) + } + } + } + } + } + return struct{}{}, false, nil + }); err != nil { + return CheckReport{}, err + } + + // Iterate over each container in turn. + if _, _, err := readContainerStore(s, func() (struct{}, bool, error) { + containers, err := s.containerStore.Containers() + if err != nil { + return struct{}{}, true, err + } + for i := range containers { + container := containers[i] + id := container.ID + logrus.Debugf("checking container %s", id) + if options.ContainerData { + // Check that all of the big data items are present and reading them + // back gives us the right amount of data. + for _, key := range container.BigDataNames { + func() { + data, err := s.containerStore.BigData(id, key) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + err = fmt.Errorf("container %s: data item %q: %w", id, key, ErrContainerDataMissing) + report.Containers[id] = append(report.Containers[id], err) + return + } + err = fmt.Errorf("container %s: data item %q: %w", id, key, err) + report.Containers[id] = append(report.Containers[id], err) + return + } + if int64(len(data)) != container.BigDataSizes[key] { + err = fmt.Errorf("container %s: data item %q: %w", id, key, ErrContainerDataIncorrectSize) + report.Containers[id] = append(report.Containers[id], err) + return + } + }() + } + } + // Look at the container's base image. If the image has errors, the image's errors + // are the container's errors. + if container.ImageID != "" { + if _, checked := examinedImages[container.ImageID]; !checked { + err := fmt.Errorf("image %s: %w", container.ImageID, ErrContainerImageMissing) + report.Containers[id] = append(report.Containers[id], err) + } + if len(report.Images[container.ImageID]) > 0 { + report.Containers[id] = append(report.Containers[id], report.Images[container.ImageID]...) + } + if len(report.ROImages[container.ImageID]) > 0 { + report.Containers[id] = append(report.Containers[id], report.ROImages[container.ImageID]...) + } + } + // Count the container's layer as referenced. + if container.LayerID != "" { + referencedLayers[container.LayerID] = true + } + } + return struct{}{}, false, nil + }); err != nil { + return CheckReport{}, err + } + + // Now go back through all of the layer stores, and flag any layers which don't belong + // to an image or a container, and has been around longer than we can reasonably expect + // such a layer to be present before a corresponding image record is added. + if _, _, err := readAllLayerStores(s, func(store roLayerStore) (struct{}, bool, error) { + if isReadWrite := roLayerStoreIsReallyReadWrite(store); !isReadWrite { + return struct{}{}, false, nil + } + layers, err := store.Layers() + if err != nil { + return struct{}{}, true, err + } + for _, layer := range layers { + maximumAge := defaultMaximumUnreferencedLayerAge + if options.LayerUnreferencedMaximumAge != nil { + maximumAge = *options.LayerUnreferencedMaximumAge + } + if referenced := referencedLayers[layer.ID]; !referenced { + if layer.Created.IsZero() || layer.Created.Add(maximumAge).Before(time.Now()) { + // Either we don't (and never will) know when this layer was + // created, or it was created far enough in the past that we're + // reasonably sure it's not part of an image that's being written + // right now. + err := fmt.Errorf("layer %s: %w", layer.ID, ErrLayerUnreferenced) + report.Layers[layer.ID] = append(report.Layers[layer.ID], err) + } + } + } + return struct{}{}, false, nil + }); err != nil { + return CheckReport{}, err + } + + // If the driver can tell us about which layers it knows about, we should have previously + // examined all of them. Any that we didn't are probably just wasted space. + // Note: if the driver doesn't support enumerating layers, it returns ErrNotSupported. + if err := s.startUsingGraphDriver(); err != nil { + return CheckReport{}, err + } + defer s.stopUsingGraphDriver() + layerList, err := s.graphDriver.ListLayers() + if err != nil && !errors.Is(err, drivers.ErrNotSupported) { + return CheckReport{}, err + } + if !errors.Is(err, drivers.ErrNotSupported) { + for i, id := range layerList { + if _, known := referencedLayers[id]; !known { + err := fmt.Errorf("layer %s: %w", id, ErrLayerUnaccounted) + report.Layers[id] = append(report.Layers[id], err) + } + report.layerOrder[id] = i + 1 + } + } + + return report, nil +} + +func roLayerStoreIsReallyReadWrite(store roLayerStore) bool { + return store.(*layerStore).lockfile.IsReadWrite() +} + +func roImageStoreIsReallyReadWrite(store roImageStore) bool { + return store.(*imageStore).lockfile.IsReadWrite() +} + +// Repair removes items which are themselves damaged, or which depend on items which are damaged. +// Errors are returned if an attempt to delete an item fails. +func (s *store) Repair(report CheckReport, options *RepairOptions) []error { + if options == nil { + options = RepairEverything() + } + var errs []error + // Just delete damaged containers. + if options.RemoveContainers { + for id := range report.Containers { + err := s.DeleteContainer(id) + if err != nil && !errors.Is(err, ErrContainerUnknown) { + err := fmt.Errorf("deleting container %s: %w", id, err) + errs = append(errs, err) + } + } + } + // Now delete damaged images. Note which layers were removed as part of removing those images. + deletedLayers := make(map[string]struct{}) + for id := range report.Images { + layers, err := s.DeleteImage(id, true) + if err != nil { + if !errors.Is(err, ErrImageUnknown) && !errors.Is(err, ErrLayerUnknown) { + err := fmt.Errorf("deleting image %s: %w", id, err) + errs = append(errs, err) + } + } else { + for _, layer := range layers { + logrus.Debugf("deleted layer %s", layer) + deletedLayers[layer] = struct{}{} + } + logrus.Debugf("deleted image %s", id) + } + } + // Build a list of the layers that we need to remove, sorted with parents of layers before + // layers that they are parents of. + layersToDelete := make([]string, 0, len(report.Layers)) + for id := range report.Layers { + layersToDelete = append(layersToDelete, id) + } + depth := func(id string) int { + d := 0 + parent := report.layerParentsByLayerID[id] + for parent != "" { + d++ + parent = report.layerParentsByLayerID[parent] + } + return d + } + isUnaccounted := func(errs []error) bool { + for _, err := range errs { + if errors.Is(err, ErrLayerUnaccounted) { + return true + } + } + return false + } + sort.Slice(layersToDelete, func(i, j int) bool { + // we've not heard of either of them, so remove them in the order the driver suggested + if isUnaccounted(report.Layers[layersToDelete[i]]) && + isUnaccounted(report.Layers[layersToDelete[j]]) && + report.layerOrder[layersToDelete[i]] != 0 && report.layerOrder[layersToDelete[j]] != 0 { + return report.layerOrder[layersToDelete[i]] < report.layerOrder[layersToDelete[j]] + } + // always delete the one we've heard of first + if isUnaccounted(report.Layers[layersToDelete[i]]) && !isUnaccounted(report.Layers[layersToDelete[j]]) { + return false + } + // always delete the one we've heard of first + if !isUnaccounted(report.Layers[layersToDelete[i]]) && isUnaccounted(report.Layers[layersToDelete[j]]) { + return true + } + // we've heard of both of them; the one that's on the end of a longer chain goes first + return depth(layersToDelete[i]) > depth(layersToDelete[j]) // closer-to-a-notional-base layers get removed later + }) + // Now delete the layers that haven't been removed along with images. + for _, id := range layersToDelete { + if _, ok := deletedLayers[id]; ok { + continue + } + for _, reportedErr := range report.Layers[id] { + var err error + // If a layer was unaccounted for, remove it at the storage driver level. + // Otherwise, remove it at the higher level and let the higher level + // logic worry about telling the storage driver to delete the layer. + if errors.Is(reportedErr, ErrLayerUnaccounted) { + if err = s.graphDriver.Remove(id); err != nil { + err = fmt.Errorf("deleting storage layer %s: %v", id, err) + } else { + logrus.Debugf("deleted storage layer %s", id) + } + } else { + var stillMounted bool + if stillMounted, err = s.Unmount(id, true); err == nil && !stillMounted { + logrus.Debugf("unmounted layer %s", id) + } else if err != nil { + logrus.Debugf("unmounting layer %s: %v", id, err) + } else { + logrus.Debugf("layer %s still mounted", id) + } + if err = s.DeleteLayer(id); err != nil { + err = fmt.Errorf("deleting layer %s: %w", id, err) + logrus.Debugf("deleted layer %s", id) + } + } + if err != nil && !errors.Is(err, ErrLayerUnknown) && !errors.Is(err, ErrNotALayer) && !errors.Is(err, os.ErrNotExist) { + errs = append(errs, err) + } + } + } + return errs +} + +// compareFileInfo returns a string summarizing what's different between the two checkFileInfos +func compareFileInfo(a, b checkFileInfo, idmap *idtools.IDMappings, ignore checkIgnore) string { + var comparison []string + if a.typeflag != b.typeflag { + comparison = append(comparison, fmt.Sprintf("filetype:%v→%v", a.typeflag, b.typeflag)) + } + if idmap != nil && !idmap.Empty() { + mappedUID, mappedGID, err := idmap.ToContainer(idtools.IDPair{UID: b.uid, GID: b.gid}) + if err != nil { + return err.Error() + } + b.uid, b.gid = mappedUID, mappedGID + } + if a.uid != b.uid && !ignore.ownership { + comparison = append(comparison, fmt.Sprintf("uid:%d→%d", a.uid, b.uid)) + } + if a.gid != b.gid && !ignore.ownership { + comparison = append(comparison, fmt.Sprintf("gid:%d→%d", a.gid, b.gid)) + } + if a.size != b.size { + comparison = append(comparison, fmt.Sprintf("size:%d→%d", a.size, b.size)) + } + if (os.ModeType|os.ModePerm)&a.mode != (os.ModeType|os.ModePerm)&b.mode && !ignore.permissions { + comparison = append(comparison, fmt.Sprintf("mode:%04o→%04o", a.mode, b.mode)) + } + if a.mtime != b.mtime && !ignore.timestamps { + comparison = append(comparison, fmt.Sprintf("mtime:0x%x→0x%x", a.mtime, b.mtime)) + } + return strings.Join(comparison, ",") +} + +// checkFileInfo is what we care about for files +type checkFileInfo struct { + typeflag byte + uid, gid int + size int64 + mode os.FileMode + mtime int64 // unix-style whole seconds +} + +// checkDirectory is a node in a filesystem record, possibly the top +type checkDirectory struct { + directory map[string]*checkDirectory // subdirectories + file map[string]checkFileInfo // non-directories + checkFileInfo +} + +// newCheckDirectory creates an empty checkDirectory +func newCheckDirectory(uid, gid int, size int64, mode os.FileMode, mtime int64) *checkDirectory { + return &checkDirectory{ + directory: make(map[string]*checkDirectory), + file: make(map[string]checkFileInfo), + checkFileInfo: checkFileInfo{ + typeflag: tar.TypeDir, + uid: uid, + gid: gid, + size: size, + mode: mode, + mtime: mtime, + }, + } +} + +// newCheckDirectoryDefaults creates an empty checkDirectory with hardwired defaults for the UID +// (0), GID (0), size (0) and permissions (0o555) +func newCheckDirectoryDefaults() *checkDirectory { + return newCheckDirectory(0, 0, 0, 0o555, time.Now().Unix()) +} + +// newCheckDirectoryFromDirectory creates a checkDirectory for an on-disk directory tree +func newCheckDirectoryFromDirectory(dir string) (*checkDirectory, error) { + cd := newCheckDirectoryDefaults() + err := filepath.Walk(dir, func(walkpath string, info os.FileInfo, err error) error { + if err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + rel, err := filepath.Rel(dir, walkpath) + if err != nil { + return err + } + hdr, err := tar.FileInfoHeader(info, "") // we don't record link targets, so don't bother looking it up + if err != nil { + return err + } + hdr.Name = filepath.ToSlash(rel) + cd.header(hdr) + return nil + }) + if err != nil { + return nil, err + } + return cd, nil +} + +// add adds an item to a checkDirectory +func (c *checkDirectory) add(path string, typeflag byte, uid, gid int, size int64, mode os.FileMode, mtime int64) { + components := strings.Split(path, "/") + if components[len(components)-1] == "" { + components = components[:len(components)-1] + } + if components[0] == "." { + components = components[1:] + } + if typeflag != tar.TypeReg { + size = 0 + } + switch len(components) { + case 0: + c.uid = uid + c.gid = gid + c.mode = mode + c.mtime = mtime + return + case 1: + switch typeflag { + case tar.TypeDir: + delete(c.file, components[0]) + // directory entries are mergers, not replacements + if _, present := c.directory[components[0]]; !present { + c.directory[components[0]] = newCheckDirectory(uid, gid, size, mode, mtime) + } else { + c.directory[components[0]].checkFileInfo = checkFileInfo{ + typeflag: tar.TypeDir, + uid: uid, + gid: gid, + size: size, + mode: mode, + mtime: mtime, + } + } + default: + // treat these as TypeReg items + delete(c.directory, components[0]) + c.file[components[0]] = checkFileInfo{ + typeflag: typeflag, + uid: uid, + gid: gid, + size: size, + mode: mode, + mtime: mtime, + } + case tar.TypeXGlobalHeader: + // ignore, since even though it looks like a valid pathname, it doesn't end + // up on the filesystem + } + return + } + subdirectory := c.directory[components[0]] + if subdirectory == nil { + subdirectory = newCheckDirectory(uid, gid, size, mode, mtime) + c.directory[components[0]] = subdirectory + } + subdirectory.add(strings.Join(components[1:], "/"), typeflag, uid, gid, size, mode, mtime) +} + +// remove removes an item from a checkDirectory +func (c *checkDirectory) remove(path string) { + components := strings.Split(path, "/") + if len(components) == 1 { + delete(c.directory, components[0]) + delete(c.file, components[0]) + return + } + subdirectory := c.directory[components[0]] + if subdirectory != nil { + subdirectory.remove(strings.Join(components[1:], "/")) + } +} + +// header updates a checkDirectory using information from the passed-in header +func (c *checkDirectory) header(hdr *tar.Header) { + name := path.Clean(hdr.Name) + dir, base := path.Split(name) + if strings.HasPrefix(base, archive.WhiteoutPrefix) { + if base == archive.WhiteoutOpaqueDir { + c.remove(path.Clean(dir)) + c.add(path.Clean(dir), tar.TypeDir, hdr.Uid, hdr.Gid, hdr.Size, os.FileMode(hdr.Mode), hdr.ModTime.Unix()) + } else { + c.remove(path.Join(dir, base[len(archive.WhiteoutPrefix):])) + } + } else { + if hdr.Typeflag == tar.TypeLink { + // look up the attributes of the target of the hard link + // n.b. by convention, Linkname is always relative to the + // root directory of the archive, which is not always the + // same as being relative to hdr.Name + directory := c + for _, component := range strings.Split(path.Clean(hdr.Linkname), "/") { + if component == "." || component == ".." { + continue + } + if subdir, ok := directory.directory[component]; ok { + directory = subdir + continue + } + if file, ok := directory.file[component]; ok { + hdr.Typeflag = file.typeflag + hdr.Uid = file.uid + hdr.Gid = file.gid + hdr.Size = file.size + hdr.Mode = int64(file.mode) + hdr.ModTime = time.Unix(file.mtime, 0) + } + break + } + } + c.add(name, hdr.Typeflag, hdr.Uid, hdr.Gid, hdr.Size, os.FileMode(hdr.Mode), hdr.ModTime.Unix()) + } +} + +// headers updates a checkDirectory using information from the passed-in header slice +func (c *checkDirectory) headers(hdrs []*tar.Header) { + hdrs = append([]*tar.Header{}, hdrs...) + // sort the headers from the diff to ensure that whiteouts appear + // before content when they both appear in the same directory, per + // https://github.com/opencontainers/image-spec/blob/main/layer.md#whiteouts + // and that hard links appear after other types of entries + sort.SliceStable(hdrs, func(i, j int) bool { + if hdrs[i].Typeflag != tar.TypeLink && hdrs[j].Typeflag == tar.TypeLink { + return true + } + if hdrs[i].Typeflag == tar.TypeLink && hdrs[j].Typeflag != tar.TypeLink { + return false + } + idir, ifile := path.Split(hdrs[i].Name) + jdir, jfile := path.Split(hdrs[j].Name) + if idir != jdir { + return hdrs[i].Name < hdrs[j].Name + } + if ifile == archive.WhiteoutOpaqueDir { + return true + } + if strings.HasPrefix(ifile, archive.WhiteoutPrefix) && !strings.HasPrefix(jfile, archive.WhiteoutPrefix) { + return true + } + return false + }) + for _, hdr := range hdrs { + c.header(hdr) + } +} + +// names provides a sorted list of the path names in the directory tree +func (c *checkDirectory) names() []string { + names := make([]string, 0, len(c.file)+len(c.directory)) + for name := range c.file { + names = append(names, name) + } + for name, subdirectory := range c.directory { + names = append(names, name+"/") + for _, subname := range subdirectory.names() { + names = append(names, name+"/"+subname) + } + } + return names +} + +// compareCheckSubdirectory walks two subdirectory trees and returns a list of differences +func compareCheckSubdirectory(path string, a, b *checkDirectory, idmap *idtools.IDMappings, ignore checkIgnore) []string { + var diff []string + if a == nil { + a = newCheckDirectoryDefaults() + } + if b == nil { + b = newCheckDirectoryDefaults() + } + for aname, adir := range a.directory { + if bdir, present := b.directory[aname]; !present { + // directory was removed + diff = append(diff, "-"+path+"/"+aname+"/") + diff = append(diff, compareCheckSubdirectory(path+"/"+aname, adir, nil, idmap, ignore)...) + } else { + // directory is in both trees; descend + if attributes := compareFileInfo(adir.checkFileInfo, bdir.checkFileInfo, idmap, ignore); attributes != "" { + diff = append(diff, path+"/"+aname+"("+attributes+")") + } + diff = append(diff, compareCheckSubdirectory(path+"/"+aname, adir, bdir, idmap, ignore)...) + } + } + for bname, bdir := range b.directory { + if _, present := a.directory[bname]; !present { + // directory added + diff = append(diff, "+"+path+"/"+bname+"/") + diff = append(diff, compareCheckSubdirectory(path+"/"+bname, nil, bdir, idmap, ignore)...) + } + } + for aname, afile := range a.file { + if bfile, present := b.file[aname]; !present { + // non-directory removed or replaced + diff = append(diff, "-"+path+"/"+aname) + } else { + // item is in both trees; compare + if attributes := compareFileInfo(afile, bfile, idmap, ignore); attributes != "" { + diff = append(diff, path+"/"+aname+"("+attributes+")") + } + } + } + for bname := range b.file { + filetype, present := a.file[bname] + if !present { + // non-directory added or replaced with something else + diff = append(diff, "+"+path+"/"+bname) + continue + } + if attributes := compareFileInfo(filetype, b.file[bname], idmap, ignore); attributes != "" { + // non-directory replaced with non-directory + diff = append(diff, "+"+path+"/"+bname+"("+attributes+")") + } + } + return diff +} + +// compareCheckDirectory walks two directory trees and returns a sorted list of differences +func compareCheckDirectory(a, b *checkDirectory, idmap *idtools.IDMappings, ignore checkIgnore) []string { + diff := compareCheckSubdirectory("", a, b, idmap, ignore) + sort.Slice(diff, func(i, j int) bool { + if strings.Compare(diff[i][1:], diff[j][1:]) < 0 { + return true + } + if diff[i][0] == '-' { + return true + } + return false + }) + return diff +} diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index 5866b2f98..a7dfb405b 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -523,13 +523,20 @@ func (r *containerStore) load(lockedForWriting bool) (bool, error) { // The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes). func (r *containerStore) save(saveLocations containerLocations) error { r.lockfile.AssertLockedForWriting() + // This must be done before we write the file, because the process could be terminated + // after the file is written but before the lock file is updated. + lw, err := r.lockfile.RecordWrite() + if err != nil { + return err + } + r.lastWrite = lw for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ { location := containerLocationFromIndex(locationIndex) if location&saveLocations == 0 { continue } rpath := r.jsonPath[locationIndex] - if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { + if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil { return err } subsetContainers := make([]*Container, 0, len(r.containers)) @@ -549,15 +556,10 @@ func (r *containerStore) save(saveLocations containerLocations) error { NoSync: true, } } - if err := ioutils.AtomicWriteFileWithOpts(rpath, jdata, 0600, opts); err != nil { + if err := ioutils.AtomicWriteFileWithOpts(rpath, jdata, 0o600, opts); err != nil { return err } } - lw, err := r.lockfile.RecordWrite() - if err != nil { - return err - } - r.lastWrite = lw return nil } @@ -569,12 +571,12 @@ func (r *containerStore) saveFor(modifiedContainer *Container) error { } func newContainerStore(dir string, runDir string, transient bool) (rwContainerStore, error) { - if err := os.MkdirAll(dir, 0700); err != nil { + if err := os.MkdirAll(dir, 0o700); err != nil { return nil, err } volatileDir := dir if transient { - if err := os.MkdirAll(runDir, 0700); err != nil { + if err := os.MkdirAll(runDir, 0o700); err != nil { return nil, err } volatileDir = runDir @@ -926,10 +928,10 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error { if !ok { return ErrContainerUnknown } - if err := os.MkdirAll(r.datadir(c.ID), 0700); err != nil { + if err := os.MkdirAll(r.datadir(c.ID), 0o700); err != nil { return err } - err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0600) + err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0o600) if err == nil { save := false if c.BigDataSizes == nil { diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go index 301ee24d2..0b1766210 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -64,7 +64,7 @@ var ( enableDirperm bool ) -const defaultPerms = os.FileMode(0555) +const defaultPerms = os.FileMode(0o555) func init() { graphdriver.MustRegister("aufs", Init) @@ -87,11 +87,9 @@ type Driver struct { // Init returns a new AUFS driver. // An error is returned if AUFS is not supported. func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { - // Try to load the aufs kernel module if err := supportsAufs(); err != nil { return nil, fmt.Errorf("kernel does not support aufs: %w", graphdriver.ErrNotSupported) - } fsMagic, err := graphdriver.GetFSMagic(home) @@ -145,7 +143,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) // Create the root aufs driver dir and return // if it already exists // If not populate the dir structure - if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(home, 0o700, rootUID, rootGID); err != nil { if os.IsExist(err) { return a, nil } @@ -158,7 +156,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) // Populate the dir structure for _, p := range paths { - if err := idtools.MkdirAllAs(path.Join(home, p), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(path.Join(home, p), 0o700, rootUID, rootGID); err != nil { return nil, err } } @@ -251,9 +249,21 @@ func (a *Driver) Exists(id string) bool { return true } -// List layers (not including additional image stores) +// ListLayers() returns all of the layers known to the driver. func (a *Driver) ListLayers() ([]string, error) { - return nil, graphdriver.ErrNotSupported + diffsDir := filepath.Join(a.rootPath(), "diff") + entries, err := os.ReadDir(diffsDir) + if err != nil { + return nil, err + } + results := make([]string, 0, len(entries)) + for _, entry := range entries { + if !entry.IsDir() { + continue + } + results = append(results, entry.Name()) + } + return results, nil } // AdditionalImageStores returns additional image stores supported by the driver @@ -278,7 +288,6 @@ func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts // Create three folders for each id // mnt, layers, and diff func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - if opts != nil && len(opts.StorageOpt) != 0 { return fmt.Errorf("--storage-opt is not supported for aufs") } diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go index 8452fa189..42d55c1a7 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -42,7 +42,7 @@ import ( "golang.org/x/sys/unix" ) -const defaultPerms = os.FileMode(0555) +const defaultPerms = os.FileMode(0o555) func init() { graphdriver.MustRegister("btrfs", Init) @@ -56,7 +56,6 @@ type btrfsOptions struct { // Init returns a new BTRFS driver. // An error is returned if BTRFS is not supported. func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { - fsMagic, err := graphdriver.GetFSMagic(home) if err != nil { return nil, err @@ -70,7 +69,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) if err != nil { return nil, err } - if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(filepath.Join(home, "subvolumes"), 0o700, rootUID, rootGID); err != nil { return nil, err } @@ -119,7 +118,7 @@ func parseOptions(opt []string) (btrfsOptions, bool, error) { case "btrfs.mountopt": return options, userDiskQuota, fmt.Errorf("btrfs driver does not support mount options") default: - return options, userDiskQuota, fmt.Errorf("unknown option %s", key) + return options, userDiskQuota, fmt.Errorf("unknown option %s (%q)", key, option) } } return options, userDiskQuota, nil @@ -127,7 +126,7 @@ func parseOptions(opt []string) (btrfsOptions, bool, error) { // Driver contains information about the filesystem mounted. type Driver struct { - //root of the file system + // root of the file system home string uidMaps []idtools.IDMap gidMaps []idtools.IDMap @@ -226,7 +225,7 @@ func subvolSnapshot(src, dest, name string) error { var args C.struct_btrfs_ioctl_vol_args_v2 args.fd = C.__s64(getDirFd(srcDir)) - var cs = C.CString(name) + cs := C.CString(name) C.set_name_btrfs_ioctl_vol_args_v2(&args, cs) C.free(unsafe.Pointer(cs)) @@ -479,13 +478,13 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts // Create the filesystem with given id. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - quotas := path.Join(d.home, "quotas") - subvolumes := path.Join(d.home, "subvolumes") + quotas := d.quotasDir() + subvolumes := d.subvolumesDir() rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } - if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(subvolumes, 0o700, rootUID, rootGID); err != nil { return err } if parent == "" { @@ -523,10 +522,10 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { return err } - if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(quotas, 0o700, rootUID, rootGID); err != nil { return err } - if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil { + if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0o644); err != nil { return err } } @@ -560,7 +559,7 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e } driver.options.size = uint64(size) default: - return fmt.Errorf("unknown option %s", key) + return fmt.Errorf("unknown option %s (%q)", key, storageOpt) } } @@ -629,18 +628,13 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { if err != nil { return "", err } - switch len(options.Options) { - case 0: - case 1: - if options.Options[0] == "ro" { + for _, opt := range options.Options { + if opt == "ro" { // ignore "ro" option - break + continue } - fallthrough - default: return "", fmt.Errorf("btrfs driver does not support mount options") } - if !st.IsDir() { return "", fmt.Errorf("%s: not a directory", dir) } @@ -679,9 +673,21 @@ func (d *Driver) Exists(id string) bool { return err == nil } -// List layers (not including additional image stores) +// List all of the layers known to the driver. func (d *Driver) ListLayers() ([]string, error) { - return nil, graphdriver.ErrNotSupported + subvolumesDir := filepath.Join(d.home, "subvolumes") + entries, err := os.ReadDir(subvolumesDir) + if err != nil { + return nil, err + } + results := make([]string, 0, len(entries)) + for _, entry := range entries { + if !entry.IsDir() { + continue + } + results = append(results, entry.Name()) + } + return results, nil } // AdditionalImageStores returns additional image stores supported by the driver diff --git a/vendor/github.com/containers/storage/drivers/chown_windows.go b/vendor/github.com/containers/storage/drivers/chown_windows.go index 1845a4e08..06ccf9fa4 100644 --- a/vendor/github.com/containers/storage/drivers/chown_windows.go +++ b/vendor/github.com/containers/storage/drivers/chown_windows.go @@ -10,8 +10,7 @@ import ( "github.com/containers/storage/pkg/idtools" ) -type platformChowner struct { -} +type platformChowner struct{} func newLChowner() *platformChowner { return &platformChowner{} diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go index aa88c1a74..9c3d7c668 100644 --- a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go +++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go @@ -11,6 +11,7 @@ package copy #endif */ import "C" + import ( "container/list" "errors" diff --git a/vendor/github.com/containers/storage/drivers/counter.go b/vendor/github.com/containers/storage/drivers/counter.go index 015766676..964dcaf2f 100644 --- a/vendor/github.com/containers/storage/drivers/counter.go +++ b/vendor/github.com/containers/storage/drivers/counter.go @@ -53,7 +53,7 @@ func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int { } } else if !c.checker.IsMounted(path) { // if the unmount was performed outside of this process (e.g. conmon cleanup) - //the ref counter would lose track of it. Check if it is still mounted. + // the ref counter would lose track of it. Check if it is still mounted. m.count = 0 } infoOp(m) diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go index 56c117d1b..388602b63 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go @@ -177,7 +177,7 @@ func writeLVMConfig(root string, cfg directLVMConfig) error { if err != nil { return fmt.Errorf("marshalling direct lvm config: %w", err) } - if err := os.WriteFile(p, b, 0600); err != nil { + if err := os.WriteFile(p, b, 0o600); err != nil { return fmt.Errorf("writing direct lvm config to file: %w", err) } return nil @@ -193,7 +193,7 @@ func setupDirectLVM(cfg directLVMConfig) error { } } - err := os.MkdirAll(lvmProfileDir, 0755) + err := os.MkdirAll(lvmProfileDir, 0o755) if err != nil { return fmt.Errorf("creating lvm profile directory: %w", err) } @@ -241,7 +241,7 @@ func setupDirectLVM(cfg directLVMConfig) error { } profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent) - err = os.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600) + err = os.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0o600) if err != nil { return fmt.Errorf("writing storage thinp autoextend profile: %w", err) } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go index d2d0effc3..5d8df8a78 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go @@ -124,7 +124,7 @@ type DeviceSet struct { deletionWorkerTicker *time.Ticker uidMaps []idtools.IDMap gidMaps []idtools.IDMap - minFreeSpacePercent uint32 //min free space percentage in thinpool + minFreeSpacePercent uint32 // min free space percentage in thinpool xfsNospaceRetries string // max retries when xfs receives ENOSPC lvmSetupConfig directLVMConfig } @@ -273,7 +273,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { if err != nil { return "", err } - if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil { + if err := idtools.MkdirAllAs(dirname, 0o700, uid, gid); err != nil { return "", err } @@ -282,7 +282,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { return "", err } logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) - file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0o600) if err != nil { return "", err } @@ -293,7 +293,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { } } else { if fi.Size() < size { - file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0o600) if err != nil { return "", err } @@ -421,7 +421,6 @@ func (devices *DeviceSet) constructDeviceIDMap() { } func (devices *DeviceSet) deviceFileWalkFunction(path string, name string) error { - // Skip some of the meta files which are not device files. if strings.HasSuffix(name, ".migrated") { logrus.Debugf("devmapper: Skipping file %s", path) @@ -458,7 +457,7 @@ func (devices *DeviceSet) loadDeviceFilesOnStart() error { logrus.Debug("devmapper: loadDeviceFilesOnStart()") defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") - var scan = func(path string, d fs.DirEntry, err error) error { + scan := func(path string, d fs.DirEntry, err error) error { if err != nil { logrus.Debugf("devmapper: Can't walk the file %s: %v", path, err) return nil @@ -1001,6 +1000,10 @@ func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { devices.Lock() defer devices.Unlock() + if devices.filesystem == "" { + devices.filesystem = determineDefaultFS() + } + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { return err } @@ -1152,7 +1155,6 @@ func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { } func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { - if !userBaseSize { return nil } @@ -1191,7 +1193,7 @@ func (devices *DeviceSet) growFS(info *devInfo) error { fsMountPoint := "/run/containers/storage/mnt" if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { - if err := os.MkdirAll(fsMountPoint, 0700); err != nil { + if err := os.MkdirAll(fsMountPoint, 0o700); err != nil { return err } defer os.RemoveAll(fsMountPoint) @@ -1657,7 +1659,6 @@ func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { } func (devices *DeviceSet) enableDeferredRemovalDeletion() error { - // If user asked for deferred removal then check both libdm library // and kernel driver support deferred removal otherwise error out. if enableDeferredRemoval { @@ -1695,16 +1696,19 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { } } - //create the root dir of the devmapper driver ownership to match this - //daemon's remapped root uid/gid so containers can start properly + // create the root dir of the devmapper driver ownership to match this + // daemon's remapped root uid/gid so containers can start properly uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) if err != nil { return err } - if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil { + if err := idtools.MkdirAs(devices.root, 0o700, uid, gid); err != nil { + return err + } + if err := os.MkdirAll(devices.metadataDir(), 0o700); err != nil { return err } - if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil { + if err := idtools.MkdirAs(filepath.Join(devices.root, "mnt"), 0o700, uid, gid); err != nil && !errors.Is(err, os.ErrExist) { return err } @@ -1811,7 +1815,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { devices.dataLoopFile = data devices.dataDevice = dataFile.Name() } else { - dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) + dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0o600) if err != nil { return err } @@ -1844,7 +1848,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { devices.metadataLoopFile = metadata devices.metadataDevice = metadataFile.Name() } else { - metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) + metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0o600) if err != nil { return err } @@ -1966,7 +1970,6 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string } func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) { - // Read size to change the block device size per container. for key, val := range storageOpt { key := strings.ToLower(key) @@ -2317,7 +2320,7 @@ func (devices *DeviceSet) Shutdown(home string) error { info.lock.Lock() devices.Lock() if err := devices.deactivateDevice(info); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err) + logrus.Debugf("devmapper: Shutdown deactivate base, error: %s", err) } devices.Unlock() info.lock.Unlock() @@ -2326,7 +2329,7 @@ func (devices *DeviceSet) Shutdown(home string) error { devices.Lock() if devices.thinPoolDevice == "" { if err := devices.deactivatePool(); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err) + logrus.Debugf("devmapper: Shutdown deactivate pool, error: %s", err) } } devices.Unlock() @@ -2483,6 +2486,26 @@ func (devices *DeviceSet) List() []string { return ids } +// ListLayers returns a list of device IDs, omitting the ""/"base" device and +// any which have been marked as deleted. +func (devices *DeviceSet) ListLayers() ([]string, error) { + if err := devices.cleanupDeletedDevices(); err != nil { + return nil, err + } + + devices.Lock() + defer devices.Unlock() + + ids := make([]string, 0, len(devices.Devices)) + for k, d := range devices.Devices { + if k == "" || d.Deleted { + continue + } + ids = append(ids, k) + } + return ids, nil +} + func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { var params string _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) @@ -2520,7 +2543,6 @@ func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { } sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) - if err != nil { return nil, err } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go index 8b3ee51df..8b8a1d177 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go @@ -20,7 +20,7 @@ import ( "golang.org/x/sys/unix" ) -const defaultPerms = os.FileMode(0555) +const defaultPerms = os.FileMode(0o555) func init() { graphdriver.MustRegister("devicemapper", Init) @@ -55,7 +55,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), locker: locker.New(), } - return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil } @@ -103,7 +102,6 @@ func (d *Driver) Status() [][2]string { // Metadata returns a map of information about the device. func (d *Driver) Metadata(id string) (map[string]string, error) { m, err := d.DeviceSet.exportDeviceMetadata(id) - if err != nil { return nil, err } @@ -202,11 +200,11 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { } // Create the target directories if they don't exist - if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil { + if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0o755, uid, gid); err != nil { d.ctr.Decrement(mp) return "", err } - if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) { + if err := idtools.MkdirAs(mp, 0o755, uid, gid); err != nil && !os.IsExist(err) { d.ctr.Decrement(mp) return "", err } @@ -227,7 +225,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { // Create an "id" file with the container/image id in it to help reconstruct this in case // of later problems - if err := os.WriteFile(idFile, []byte(id), 0600); err != nil { + if err := os.WriteFile(idFile, []byte(id), 0o600); err != nil { d.ctr.Decrement(mp) d.DeviceSet.UnmountDevice(id, mp) return "", err @@ -267,11 +265,6 @@ func (d *Driver) Exists(id string) bool { return d.DeviceSet.HasDevice(id) } -// List layers (not including additional image stores) -func (d *Driver) ListLayers() ([]string, error) { - return nil, graphdriver.ErrNotSupported -} - // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { return nil diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index 055d99d18..f7b0d6891 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -111,6 +111,10 @@ type ProtoDriver interface { Exists(id string) bool // Returns a list of layer ids that exist on this driver (does not include // additional storage layers). Not supported by all backends. + // If the driver requires that layers be removed in a particular order, + // usually due to parent-child relationships that it cares about, The + // list should be sorted well enough so that if all layers need to be + // removed, they can be removed in the order in which they're returned. ListLayers() ([]string, error) // Status returns a set of key-value pairs which give low // level diagnostic status about this driver. @@ -183,6 +187,8 @@ type DriverWithDifferOutput struct { UncompressedDigest digest.Digest Metadata string BigData map[string][]byte + TarSplit []byte + TOCDigest digest.Digest } // Differ defines the interface for using a custom differ. @@ -322,6 +328,7 @@ func getBuiltinDriver(name, home string, options Options) (Driver, error) { type Options struct { Root string RunRoot string + ImageStore string DriverPriority []string DriverOptions []string UIDMaps []idtools.IDMap @@ -337,12 +344,12 @@ func New(name string, config Options) (Driver, error) { } // Guess for prior driver - driversMap := scanPriorDrivers(config.Root) + driversMap := ScanPriorDrivers(config.Root) // use the supplied priority list unless it is empty prioList := config.DriverPriority if len(prioList) == 0 { - prioList = priority + prioList = Priority } for _, name := range prioList { @@ -414,12 +421,12 @@ func isDriverNotSupported(err error) bool { } // scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers -func scanPriorDrivers(root string) map[string]bool { +func ScanPriorDrivers(root string) map[string]bool { driversMap := make(map[string]bool) for driver := range drivers { p := filepath.Join(root, driver) - if _, err := os.Stat(p); err == nil && driver != "vfs" { + if _, err := os.Stat(p); err == nil { driversMap[driver] = true } } diff --git a/vendor/github.com/containers/storage/drivers/driver_darwin.go b/vendor/github.com/containers/storage/drivers/driver_darwin.go index 357851543..b60883a9e 100644 --- a/vendor/github.com/containers/storage/drivers/driver_darwin.go +++ b/vendor/github.com/containers/storage/drivers/driver_darwin.go @@ -1,11 +1,9 @@ package graphdriver -var ( - // Slice of drivers that should be used in order - priority = []string{ - "vfs", - } -) +// Slice of drivers that should be used in order +var Priority = []string{ + "vfs", +} // GetFSMagic returns the filesystem id given the path. func GetFSMagic(rootpath string) (FsMagic, error) { diff --git a/vendor/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/github.com/containers/storage/drivers/driver_freebsd.go index 143cccf92..a6072ab56 100644 --- a/vendor/github.com/containers/storage/drivers/driver_freebsd.go +++ b/vendor/github.com/containers/storage/drivers/driver_freebsd.go @@ -13,7 +13,7 @@ const ( var ( // Slice of drivers that should be used in an order - priority = []string{ + Priority = []string{ "zfs", "vfs", } @@ -31,8 +31,7 @@ func NewDefaultChecker() Checker { return &defaultChecker{} } -type defaultChecker struct { -} +type defaultChecker struct{} func (c *defaultChecker) IsMounted(path string) bool { m, _ := mount.Mounted(path) diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go index b9e57a60d..cd806b8ff 100644 --- a/vendor/github.com/containers/storage/drivers/driver_linux.go +++ b/vendor/github.com/containers/storage/drivers/driver_linux.go @@ -60,6 +60,8 @@ const ( FsMagicCephFs = FsMagic(0x00C36400) // FsMagicCIFS filesystem id for CIFS FsMagicCIFS = FsMagic(0xFF534D42) + // FsMagicEROFS filesystem id for EROFS + FsMagicEROFS = FsMagic(0xE0F5E1E2) // FsMagicFHGFS filesystem id for FHGFS FsMagicFHGFSFs = FsMagic(0x19830326) // FsMagicIBRIX filesystem id for IBRIX @@ -90,7 +92,7 @@ const ( var ( // Slice of drivers that should be used in an order - priority = []string{ + Priority = []string{ "overlay", // We don't support devicemapper without configuration // "devicemapper", @@ -106,6 +108,7 @@ var ( FsMagicBtrfs: "btrfs", FsMagicCramfs: "cramfs", FsMagicEcryptfs: "ecryptfs", + FsMagicEROFS: "erofs", FsMagicExtfs: "extfs", FsMagicF2fs: "f2fs", FsMagicGPFS: "gpfs", @@ -161,8 +164,7 @@ func NewDefaultChecker() Checker { return &defaultChecker{} } -type defaultChecker struct { -} +type defaultChecker struct{} func (c *defaultChecker) IsMounted(path string) bool { m, _ := mount.Mounted(path) diff --git a/vendor/github.com/containers/storage/drivers/driver_solaris.go b/vendor/github.com/containers/storage/drivers/driver_solaris.go index 962edd176..6b6373a37 100644 --- a/vendor/github.com/containers/storage/drivers/driver_solaris.go +++ b/vendor/github.com/containers/storage/drivers/driver_solaris.go @@ -16,6 +16,7 @@ static inline struct statvfs *getstatfs(char *s) { } */ import "C" + import ( "path/filepath" "unsafe" @@ -31,7 +32,7 @@ const ( var ( // Slice of drivers that should be used in an order - priority = []string{ + Priority = []string{ "zfs", } @@ -69,8 +70,7 @@ func NewDefaultChecker() Checker { return &defaultChecker{} } -type defaultChecker struct { -} +type defaultChecker struct{} func (c *defaultChecker) IsMounted(path string) bool { m, _ := mount.Mounted(path) @@ -80,7 +80,6 @@ func (c *defaultChecker) IsMounted(path string) bool { // Mounted checks if the given path is mounted as the fs type // Solaris supports only ZFS for now func Mounted(fsType FsMagic, mountPath string) (bool, error) { - cs := C.CString(filepath.Dir(mountPath)) defer C.free(unsafe.Pointer(cs)) buf := C.getstatfs(cs) diff --git a/vendor/github.com/containers/storage/drivers/driver_unsupported.go b/vendor/github.com/containers/storage/drivers/driver_unsupported.go index 8119d9a6c..7dfbef007 100644 --- a/vendor/github.com/containers/storage/drivers/driver_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/driver_unsupported.go @@ -3,12 +3,10 @@ package graphdriver -var ( - // Slice of drivers that should be used in an order - priority = []string{ - "unsupported", - } -) +// Slice of drivers that should be used in an order +var Priority = []string{ + "unsupported", +} // GetFSMagic returns the filesystem id given the path. func GetFSMagic(rootpath string) (FsMagic, error) { diff --git a/vendor/github.com/containers/storage/drivers/driver_windows.go b/vendor/github.com/containers/storage/drivers/driver_windows.go index ffd30c295..54bd139a3 100644 --- a/vendor/github.com/containers/storage/drivers/driver_windows.go +++ b/vendor/github.com/containers/storage/drivers/driver_windows.go @@ -1,11 +1,9 @@ package graphdriver -var ( - // Slice of drivers that should be used in order - priority = []string{ - "windowsfilter", - } -) +// Slice of drivers that should be used in order +var Priority = []string{ + "windowsfilter", +} // GetFSMagic returns the filesystem id given the path. func GetFSMagic(rootpath string) (FsMagic, error) { diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go index 6b2496ec5..fba9ec4fc 100644 --- a/vendor/github.com/containers/storage/drivers/fsdiff.go +++ b/vendor/github.com/containers/storage/drivers/fsdiff.go @@ -14,11 +14,9 @@ import ( "github.com/sirupsen/logrus" ) -var ( - // ApplyUncompressedLayer defines the unpack method used by the graph - // driver. - ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer -) +// ApplyUncompressedLayer defines the unpack method used by the graph +// driver. +var ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer // NaiveDiffDriver takes a ProtoDriver and adds the // capability of the Diffing methods which it may or may not @@ -57,6 +55,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare options := MountOpts{ MountLabel: mountLabel, + Options: []string{"ro"}, } layerFs, err := driver.Get(id, options) if err != nil { @@ -173,7 +172,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts) } defer driverPut(driver, id, &err) - defaultForceMask := os.FileMode(0700) + defaultForceMask := os.FileMode(0o700) var forceMask *os.FileMode // = nil if runtime.GOOS == "darwin" { forceMask = &defaultForceMask diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go index 437112742..60980994b 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/check.go +++ b/vendor/github.com/containers/storage/drivers/overlay/check.go @@ -38,22 +38,22 @@ func doesSupportNativeDiff(d, mountOpts string) error { }() // Make directories l1/d, l1/d1, l2/d, l3, work, merged - if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0o755); err != nil { return err } - if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0o755); err != nil { return err } - if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0o755); err != nil { return err } - if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil { + if err := os.Mkdir(filepath.Join(td, "l3"), 0o755); err != nil { return err } - if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { + if err := os.Mkdir(filepath.Join(td, "work"), 0o755); err != nil { return err } - if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { + if err := os.Mkdir(filepath.Join(td, "merged"), 0o755); err != nil { return err } @@ -82,7 +82,7 @@ func doesSupportNativeDiff(d, mountOpts string) error { }() // Touch file in d to force copy up of opaque directory "d" from "l2" to "l3" - if err := os.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil { + if err := os.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0o644); err != nil { return fmt.Errorf("failed to write to merged directory: %w", err) } @@ -132,19 +132,19 @@ func doesMetacopy(d, mountOpts string) (bool, error) { }() // Make directories l1, l2, work, merged - if err := os.MkdirAll(filepath.Join(td, "l1"), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(td, "l1"), 0o755); err != nil { return false, err } - if err := ioutils.AtomicWriteFile(filepath.Join(td, "l1", "f"), []byte{0xff}, 0700); err != nil { + if err := ioutils.AtomicWriteFile(filepath.Join(td, "l1", "f"), []byte{0xff}, 0o700); err != nil { return false, err } - if err := os.MkdirAll(filepath.Join(td, "l2"), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(td, "l2"), 0o755); err != nil { return false, err } - if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { + if err := os.Mkdir(filepath.Join(td, "work"), 0o755); err != nil { return false, err } - if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { + if err := os.Mkdir(filepath.Join(td, "merged"), 0o755); err != nil { return false, err } // Mount using the mandatory options and configured options @@ -170,7 +170,7 @@ func doesMetacopy(d, mountOpts string) (bool, error) { }() // Make a change that only impacts the inode, and check if the pulled-up copy is marked // as a metadata-only copy - if err := os.Chmod(filepath.Join(td, "merged", "f"), 0600); err != nil { + if err := os.Chmod(filepath.Join(td, "merged", "f"), 0o600); err != nil { return false, fmt.Errorf("changing permissions on file for metacopy check: %w", err) } metacopy, err := system.Lgetxattr(filepath.Join(td, "l2", "f"), archive.GetOverlayXattrName("metacopy")) @@ -196,20 +196,23 @@ func doesVolatile(d string) (bool, error) { } }() - if err := os.MkdirAll(filepath.Join(td, "lower"), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(td, "lower"), 0o755); err != nil { return false, err } - if err := os.MkdirAll(filepath.Join(td, "upper"), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(td, "upper"), 0o755); err != nil { return false, err } - if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { + if err := os.Mkdir(filepath.Join(td, "work"), 0o755); err != nil { return false, err } - if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { + if err := os.Mkdir(filepath.Join(td, "merged"), 0o755); err != nil { return false, err } // Mount using the mandatory options and configured options opts := fmt.Sprintf("volatile,lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "lower"), path.Join(td, "upper"), path.Join(td, "work")) + if unshare.IsRootless() { + opts = fmt.Sprintf("%s,userxattr", opts) + } if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { return false, fmt.Errorf("failed to mount overlay for volatile check: %w", err) } @@ -238,11 +241,11 @@ func supportsIdmappedLowerLayers(home string) (bool, error) { upperDir := filepath.Join(layerDir, "upper") workDir := filepath.Join(layerDir, "work") - _ = idtools.MkdirAs(mergedDir, 0700, 0, 0) - _ = idtools.MkdirAs(lowerDir, 0700, 0, 0) - _ = idtools.MkdirAs(lowerMappedDir, 0700, 0, 0) - _ = idtools.MkdirAs(upperDir, 0700, 0, 0) - _ = idtools.MkdirAs(workDir, 0700, 0, 0) + _ = idtools.MkdirAs(mergedDir, 0o700, 0, 0) + _ = idtools.MkdirAs(lowerDir, 0o700, 0, 0) + _ = idtools.MkdirAs(lowerMappedDir, 0o700, 0, 0) + _ = idtools.MkdirAs(upperDir, 0o700, 0, 0) + _ = idtools.MkdirAs(workDir, 0o700, 0, 0) mapping := []idtools.IDMap{ { diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go index de47951d4..33e60b118 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/mount.go +++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go @@ -55,7 +55,7 @@ func mountOverlayFrom(dir, device, target, mType string, flags uintptr, label st w.Close() return fmt.Errorf("mountfrom error on re-exec cmd: %w", err) } - //write the options to the pipe for the untar exec to read + // write the options to the pipe for the untar exec to read if err := json.NewEncoder(w).Encode(options); err != nil { w.Close() return fmt.Errorf("mountfrom json encode to pipe failed: %w", err) diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index b606713f0..1ef7122c5 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -29,7 +29,6 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" - "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/unshare" units "github.com/docker/go-units" @@ -41,13 +40,13 @@ import ( "golang.org/x/sys/unix" ) -var ( - // untar defines the untar method - untar = chrootarchive.UntarUncompressed -) +// untar defines the untar method +var untar = chrootarchive.UntarUncompressed const ( - defaultPerms = os.FileMode(0555) + defaultPerms = os.FileMode(0o555) + selinuxLabelTest = "system_u:object_r:container_file_t:s0" + mountProgramFlagFile = ".has-mount-program" ) // This backend uses the overlay union filesystem for containers @@ -78,9 +77,10 @@ const ( // that mounts do not fail due to length. const ( - linkDir = "l" - lowerFile = "lower" - maxDepth = 500 + linkDir = "l" + stagingDir = "staging" + lowerFile = "lower" + maxDepth = 500 // idLength represents the number of random characters // which can be used to create the unique link identifier @@ -110,6 +110,7 @@ type Driver struct { name string home string runhome string + imageStore string uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter @@ -124,7 +125,6 @@ type Driver struct { } type additionalLayerStore struct { - // path is the directory where this store is available on the host. path string @@ -175,11 +175,11 @@ func hasVolatileOption(opts []string) bool { } func getMountProgramFlagFile(path string) string { - return filepath.Join(path, ".has-mount-program") + return filepath.Join(path, mountProgramFlagFile) } func checkSupportVolatile(home, runhome string) (bool, error) { - feature := fmt.Sprintf("volatile") + const feature = "volatile" volatileCacheResult, _, err := cachedFeatureCheck(runhome, feature) var usingVolatile bool if err == nil { @@ -200,6 +200,8 @@ func checkSupportVolatile(home, runhome string) (bool, error) { if err = cachedFeatureRecord(runhome, feature, usingVolatile, ""); err != nil { return false, fmt.Errorf("recording volatile-being-used status: %w", err) } + } else { + usingVolatile = false } } return usingVolatile, nil @@ -303,6 +305,16 @@ func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool { // If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error. // If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned. func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { + // If custom --imagestore is selected never + // ditch the original graphRoot, instead add it as + // additionalImageStore so its images can still be + // read and used. + if options.ImageStore != "" { + graphRootAsAdditionalStore := fmt.Sprintf("AdditionalImageStore=%s", options.ImageStore) + options.DriverOptions = append(options.DriverOptions, graphRootAsAdditionalStore) + // complete base name with driver name included + options.ImageStore = filepath.Join(options.ImageStore, "overlay") + } opts, err := parseOptions(options.DriverOptions) if err != nil { return nil, err @@ -325,11 +337,17 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) } // Create the driver home dir - if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0755, 0, 0); err != nil { + if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0o755, 0, 0); err != nil { return nil, err } - if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil { + if options.ImageStore != "" { + if err := idtools.MkdirAllAs(path.Join(options.ImageStore, linkDir), 0o755, 0, 0); err != nil { + return nil, err + } + } + + if err := idtools.MkdirAllAs(runhome, 0o700, rootUID, rootGID); err != nil { return nil, err } @@ -345,12 +363,12 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) if opts.mountProgram != "" { if unshare.IsRootless() && isNetworkFileSystem(fsMagic) && opts.forceMask == nil { - m := os.FileMode(0700) + m := os.FileMode(0o700) opts.forceMask = &m logrus.Warnf("Network file system detected as backing store. Enforcing overlay option `force_mask=\"%o\"`. Add it to storage.conf to silence this warning", m) } - if err := os.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0600); err != nil { + if err := os.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0o600); err != nil { return nil, err } } else { @@ -420,6 +438,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) d := &Driver{ name: "overlay", home: home, + imageStore: options.ImageStore, runhome: runhome, uidMaps: options.UIDMaps, gidMaps: options.GIDMaps, @@ -560,9 +579,9 @@ func parseOptions(options []string) (*overlayOptions, error) { var mask int64 switch val { case "shared": - mask = 0755 + mask = 0o755 case "private": - mask = 0700 + mask = 0o700 default: mask, err = strconv.ParseInt(val, 8, 32) if err != nil { @@ -627,7 +646,7 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) { if err != nil && !os.IsNotExist(err) { return false, err } - if err := os.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0600); err != nil && !os.IsNotExist(err) { + if err := os.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0o600); err != nil && !os.IsNotExist(err) { return false, err } if needsMountProgram { @@ -640,7 +659,7 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) { for _, dir := range []string{home, runhome} { if _, err := os.Stat(dir); err != nil { - _ = idtools.MkdirAllAs(dir, 0700, 0, 0) + _ = idtools.MkdirAllAs(dir, 0o700, 0, 0) } } @@ -700,12 +719,12 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI _ = os.RemoveAll(layerDir) _ = os.Remove(home) }() - _ = idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID) - _ = idtools.MkdirAs(lower1Dir, 0700, rootUID, rootGID) - _ = idtools.MkdirAs(lower2Dir, 0700, rootUID, rootGID) - _ = idtools.MkdirAs(lower2Subdir, 0700, rootUID, rootGID) - _ = idtools.MkdirAs(upperDir, 0700, rootUID, rootGID) - _ = idtools.MkdirAs(workDir, 0700, rootUID, rootGID) + _ = idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID) + _ = idtools.MkdirAs(lower1Dir, 0o700, rootUID, rootGID) + _ = idtools.MkdirAs(lower2Dir, 0o700, rootUID, rootGID) + _ = idtools.MkdirAs(lower2Subdir, 0o700, rootUID, rootGID) + _ = idtools.MkdirAs(upperDir, 0o700, rootUID, rootGID) + _ = idtools.MkdirAs(workDir, 0o700, rootUID, rootGID) f, err := os.Create(lower2SubdirFile) if err != nil { logrus.Debugf("Unable to create test file: %v", err) @@ -723,7 +742,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI if unshare.IsRootless() { flags = fmt.Sprintf("%s,userxattr", flags) } - if err := syscall.Mknod(filepath.Join(upperDir, "whiteout"), syscall.S_IFCHR|0600, int(unix.Mkdev(0, 0))); err != nil { + if err := syscall.Mknod(filepath.Join(upperDir, "whiteout"), syscall.S_IFCHR|0o600, int(unix.Mkdev(0, 0))); err != nil { logrus.Debugf("Unable to create kernel-style whiteout: %v", err) return supportsDType, fmt.Errorf("unable to create kernel-style whiteout: %w", err) } @@ -806,15 +825,22 @@ func (d *Driver) Status() [][2]string { // Metadata returns meta data about the overlay driver such as // LowerDir, UpperDir, WorkDir and MergeDir used to store data. func (d *Driver) Metadata(id string) (map[string]string, error) { - dir := d.dir(id) + dir, imagestore, _ := d.dir2(id) if _, err := os.Stat(dir); err != nil { return nil, err } + workDirBase := dir + if imagestore != "" { + if _, err := os.Stat(dir); err != nil { + return nil, err + } + workDirBase = imagestore + } metadata := map[string]string{ - "WorkDir": path.Join(dir, "work"), - "MergedDir": path.Join(dir, "merged"), - "UpperDir": path.Join(dir, "diff"), + "WorkDir": path.Join(workDirBase, "work"), + "MergedDir": path.Join(workDirBase, "merged"), + "UpperDir": path.Join(workDirBase, "diff"), } lowerDirs, err := d.getLowerDirs(id) @@ -929,7 +955,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr } func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disableQuota bool) (retErr error) { - dir := d.dir(id) + dir, imageStore, _ := d.dir2(id) uidMaps := d.uidMaps gidMaps := d.gidMaps @@ -940,7 +966,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable } // Make the link directory if it does not exist - if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0755, 0, 0); err != nil { + if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0o755, 0, 0); err != nil { return err } @@ -954,11 +980,22 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable GID: rootGID, } - if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0755, idPair); err != nil { + if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0o755, idPair); err != nil { return err } + workDirBase := dir + if imageStore != "" { + workDirBase = imageStore + if err := idtools.MkdirAllAndChownNew(path.Dir(imageStore), 0o755, idPair); err != nil { + return err + } + } if parent != "" { - st, err := system.Stat(d.dir(parent)) + parentBase, parentImageStore, _ := d.dir2(parent) + if parentImageStore != "" { + parentBase = parentImageStore + } + st, err := system.Stat(filepath.Join(parentBase, "diff")) if err != nil { return err } @@ -975,9 +1012,14 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable } } - if err := idtools.MkdirAllAndChownNew(dir, 0700, idPair); err != nil { + if err := idtools.MkdirAllAndChownNew(dir, 0o700, idPair); err != nil { return err } + if imageStore != "" { + if err := idtools.MkdirAllAndChownNew(imageStore, 0o700, idPair); err != nil { + return err + } + } defer func() { // Clean up on failure @@ -985,6 +1027,11 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable if err2 := os.RemoveAll(dir); err2 != nil { logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2) } + if imageStore != "" { + if err2 := os.RemoveAll(workDirBase); err2 != nil { + logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", workDirBase, err2) + } + } } }() @@ -1007,44 +1054,60 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable if err := d.quotaCtl.SetQuota(dir, quota); err != nil { return err } + if imageStore != "" { + if err := d.quotaCtl.SetQuota(imageStore, quota); err != nil { + return err + } + } } perms := defaultPerms if d.options.forceMask != nil { perms = *d.options.forceMask } + if parent != "" { - st, err := system.Stat(filepath.Join(d.dir(parent), "diff")) + parentDir, parentImageStore, _ := d.dir2(parent) + base := parentDir + if parentImageStore != "" { + base = parentImageStore + } + st, err := system.Stat(filepath.Join(base, "diff")) if err != nil { return err } perms = os.FileMode(st.Mode()) } - if err := idtools.MkdirAs(path.Join(dir, "diff"), perms, rootUID, rootGID); err != nil { + if err := idtools.MkdirAs(path.Join(workDirBase, "diff"), perms, rootUID, rootGID); err != nil { return err } lid := generateID(idLength) - if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil { + + linkBase := path.Join("..", id, "diff") + if imageStore != "" { + linkBase = path.Join(imageStore, "diff") + } + if err := os.Symlink(linkBase, path.Join(d.home, linkDir, lid)); err != nil { return err } // Write link id to link file - if err := os.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { + if err := os.WriteFile(path.Join(dir, "link"), []byte(lid), 0o644); err != nil { return err } - if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAs(path.Join(workDirBase, "work"), 0o700, rootUID, rootGID); err != nil { return err } - if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0o700, rootUID, rootGID); err != nil { return err } // if no parent directory, create a dummy lower directory and skip writing a "lowers" file if parent == "" { - return idtools.MkdirAs(path.Join(dir, "empty"), 0700, rootUID, rootGID) + return idtools.MkdirAs(path.Join(dir, "empty"), 0o700, rootUID, rootGID) } lower, err := d.getLower(parent) @@ -1052,7 +1115,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable return err } if lower != "" { - if err := os.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { + if err := os.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0o666); err != nil { return err } } @@ -1120,22 +1183,26 @@ func (d *Driver) getLower(parent string) (string, error) { } func (d *Driver) dir(id string) string { - p, _ := d.dir2(id) + p, _, _ := d.dir2(id) return p } -func (d *Driver) dir2(id string) (string, bool) { +func (d *Driver) dir2(id string) (string, string, bool) { newpath := path.Join(d.home, id) + imageStore := "" + if d.imageStore != "" { + imageStore = path.Join(d.imageStore, id) + } if _, err := os.Stat(newpath); err != nil { for _, p := range d.AdditionalImageStores() { l := path.Join(p, d.name, id) _, err = os.Stat(l) if err == nil { - return l, true + return l, imageStore, true } } } - return newpath, false + return newpath, imageStore, false } func (d *Driver) getLowerDirs(id string) ([]string, error) { @@ -1223,6 +1290,9 @@ func (d *Driver) Remove(id string) error { } if d.quotaCtl != nil { d.quotaCtl.ClearQuota(dir) + if d.imageStore != "" { + d.quotaCtl.ClearQuota(d.imageStore) + } } return nil } @@ -1240,7 +1310,7 @@ func (d *Driver) recreateSymlinks() error { return fmt.Errorf("reading driver home directory %q: %w", d.home, err) } // This makes the link directory if it doesn't exist - if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0755, 0, 0); err != nil { + if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0o755, 0, 0); err != nil { return err } // Keep looping as long as we take some corrective action in each iteration @@ -1317,7 +1387,7 @@ func (d *Driver) recreateSymlinks() error { if err != nil || string(data) != link.Name() { // NOTE: If two or more links point to the same target, we will update linkFile // with every value of link.Name(), and set madeProgress = true every time. - if err := os.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil { + if err := os.WriteFile(linkFile, []byte(link.Name()), 0o644); err != nil { errs = multierror.Append(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err)) continue } @@ -1342,10 +1412,14 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { } func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) { - dir, inAdditionalStore := d.dir2(id) + dir, imageStore, inAdditionalStore := d.dir2(id) if _, err := os.Stat(dir); err != nil { return "", err } + workDirBase := dir + if imageStore != "" { + workDirBase = imageStore + } readWrite := !inAdditionalStore if !d.SupportsShifting() || options.DisableShifting { @@ -1478,18 +1552,18 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO absLowers = append(absLowers, path.Join(dir, "empty")) } // user namespace requires this to move a directory from lower to upper. - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps) if err != nil { return "", err } - diffDir := path.Join(dir, "diff") + diffDir := path.Join(workDirBase, "diff") if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil { return "", err } mergedDir := path.Join(dir, "merged") // Create the driver merged dir - if err := idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) { return "", err } if count := d.ctr.Increment(mergedDir); count > 1 { @@ -1505,7 +1579,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } }() - workdir := path.Join(dir, "work") + workdir := path.Join(workDirBase, "work") if d.options.mountProgram == "" && unshare.IsRootless() { optsList = append(optsList, "userxattr") @@ -1525,7 +1599,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO if !disableShifting && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 && d.options.mountProgram == "" { var newAbsDir []string mappedRoot := filepath.Join(d.home, id, "mapped") - if err := os.MkdirAll(mappedRoot, 0700); err != nil { + if err := os.MkdirAll(mappedRoot, 0o700); err != nil { return "", err } @@ -1612,16 +1686,15 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO // Use mountFrom when the mount data has exceeded the page size. The mount syscall fails if // the mount data cannot fit within a page and relative links make the mount data much // smaller at the expense of requiring a fork exec to chdir(). - - workdir = path.Join(id, "work") if readWrite { diffDir := path.Join(id, "diff") - opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir) + workDir := path.Join(id, "work") + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workDir) } else { opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) } if len(optsList) > 0 { - opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ",")) + opts = strings.Join(append([]string{opts}, optsList...), ",") } mountData = label.FormatMountLabel(opts, options.MountLabel) mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { @@ -1631,9 +1704,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } // overlay has a check in place to prevent mounting the same file system twice - // if volatile was already specified. - err = os.RemoveAll(filepath.Join(workdir, "work/incompat/volatile")) - if err != nil && !os.IsNotExist(err) { + // if volatile was already specified. Yes, the kernel repeats the "work" component. + err = os.RemoveAll(filepath.Join(workdir, "work", "incompat", "volatile")) + if err != nil && !errors.Is(err, os.ErrNotExist) { return "", err } @@ -1703,11 +1776,13 @@ func (d *Driver) Put(id string) error { if !unmounted { if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) { logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) + return fmt.Errorf("unmounting %q: %w", mountpoint, err) } } if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { logrus.Debugf("Failed to remove mountpoint %s overlay: %s - %v", id, mountpoint, err) + return fmt.Errorf("removing mount point %q: %w", mountpoint, err) } return nil @@ -1725,20 +1800,23 @@ func (d *Driver) ListLayers() ([]string, error) { if err != nil { return nil, err } - layers := make([]string, 0) for _, entry := range entries { id := entry.Name() - // Does it look like a datadir directory? - if !entry.IsDir() || stringid.ValidateID(id) != nil { + switch id { + case linkDir, stagingDir, quota.BackingFsBlockDeviceLink, mountProgramFlagFile: + // expected, but not a layer. skip it continue + default: + // Does it look like a datadir directory? + if !entry.IsDir() { + continue + } + layers = append(layers, id) } - - layers = append(layers, id) } - - return layers, err + return layers, nil } // isParent returns if the passed in parent is the direct parent of the passed in layer @@ -1795,7 +1873,7 @@ func (g *overlayFileGetter) Close() error { } func (d *Driver) getStagingDir() string { - return filepath.Join(d.home, "staging") + return filepath.Join(d.home, stagingDir) } // DiffGetter returns a FileGetCloser that can read files from the directory that @@ -1831,7 +1909,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App var applyDir string if id == "" { - err := os.MkdirAll(d.getStagingDir(), 0700) + err := os.MkdirAll(d.getStagingDir(), 0o700) if err != nil && !os.IsExist(err) { return graphdriver.DriverWithDifferOutput{}, err } @@ -1874,6 +1952,9 @@ func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory stri if err := os.RemoveAll(diff); err != nil && !os.IsNotExist(err) { return err } + + diffOutput.UncompressedDigest = diffOutput.TOCDigest + return os.Rename(stagingDirectory, diff) } @@ -1884,7 +1965,6 @@ func (d *Driver) DifferTarget(id string) (string, error) { // ApplyDiff applies the new layer into a root func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) { - if !d.isParent(id, parent) { if d.options.ignoreChownErrors { options.IgnoreChownErrors = d.options.ignoreChownErrors @@ -1922,8 +2002,12 @@ func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) } func (d *Driver) getDiffPath(id string) (string, error) { - dir := d.dir(id) - return redirectDiffIfAdditionalLayer(path.Join(dir, "diff")) + dir, imagestore, _ := d.dir2(id) + base := dir + if imagestore != "" { + base = imagestore + } + return redirectDiffIfAdditionalLayer(path.Join(base, "diff")) } func (d *Driver) getLowerDiffPaths(id string) ([]string, error) { @@ -2014,8 +2098,12 @@ func (d *Driver) AdditionalImageStores() []string { // by toContainer to those specified by toHost. func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { var err error - dir := d.dir(id) - diffDir := filepath.Join(dir, "diff") + dir, imagestore, _ := d.dir2(id) + base := dir + if imagestore != "" { + base = imagestore + } + diffDir := filepath.Join(base, "diff") rootUID, rootGID := 0, 0 if toHost != nil { @@ -2196,7 +2284,7 @@ func (al *additionalLayer) CreateAs(id, parent string) error { } // tell the additional layer store that we use this layer. // mark this layer as "additional layer" - if err := os.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0644); err != nil { + if err := os.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0o644); err != nil { return err } notifyUseAdditionalLayer(al.path) diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go index f5484dee7..10ea3c5a5 100644 --- a/vendor/github.com/containers/storage/drivers/quota/projectquota.go +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go @@ -50,6 +50,7 @@ struct fsxattr { #endif */ import "C" + import ( "errors" "fmt" @@ -67,6 +68,10 @@ import ( const projectIDsAllocatedPerQuotaHome = 10000 +// BackingFsBlockDeviceLink is the name of a file that we place in +// the home directory of a driver that uses this package. +const BackingFsBlockDeviceLink = "backingFsBlockDev" + // Quota limit params - currently we only control blocks hard limit and inodes type Quota struct { Size uint64 @@ -94,7 +99,6 @@ func generateUniqueProjectID(path string) (uint32, error) { stat, ok := fileinfo.Sys().(*syscall.Stat_t) if !ok { return 0, fmt.Errorf("not a syscall.Stat_t %s", path) - } projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome) return uint32(projectID), nil @@ -187,7 +191,6 @@ func NewControl(basePath string) (*Control, error) { // SetQuota - assign a unique project id to directory and set the quota limits // for that project id func (q *Control) SetQuota(targetPath string, quota Quota) error { - projectID, ok := q.quotas[targetPath] if !ok { projectID = q.nextProjectID @@ -235,7 +238,7 @@ func (q *Control) setProjectQuota(projectID uint32, quota Quota) error { d.d_ino_softlimit = d.d_ino_hardlimit } - var cs = C.CString(q.backingFsBlockDev) + cs := C.CString(q.backingFsBlockDev) defer C.free(unsafe.Pointer(cs)) runQuotactl := func() syscall.Errno { @@ -303,7 +306,7 @@ func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, err // // get the quota limit for the container's project id // - var cs = C.CString(q.backingFsBlockDev) + cs := C.CString(q.backingFsBlockDev) defer C.free(unsafe.Pointer(cs)) _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA, @@ -395,9 +398,9 @@ func openDir(path string) (*C.DIR, error) { Cpath := C.CString(path) defer free(Cpath) - dir := C.opendir(Cpath) + dir, errno := C.opendir(Cpath) if dir == nil { - return nil, fmt.Errorf("can't open dir %v", Cpath) + return nil, fmt.Errorf("can't open dir %v: %w", Cpath, errno) } return dir, nil } @@ -421,11 +424,18 @@ func makeBackingFsDev(home string) (string, error) { return "", err } - backingFsBlockDev := path.Join(home, "backingFsBlockDev") + backingFsBlockDev := path.Join(home, BackingFsBlockDeviceLink) backingFsBlockDevTmp := backingFsBlockDev + ".tmp" // Re-create just in case someone copied the home directory over to a new device - if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0600, int(stat.Dev)); err != nil { - return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) + if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil { + if !errors.Is(err, unix.EEXIST) { + return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) + } + // On EEXIST, try again after unlinking any potential leftover. + _ = unix.Unlink(backingFsBlockDevTmp) + if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil { + return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) + } } if err := unix.Rename(backingFsBlockDevTmp, backingFsBlockDev); err != nil { return "", fmt.Errorf("failed to rename %s to %s: %w", backingFsBlockDevTmp, backingFsBlockDev, err) diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go index 2f6c7f28f..648fd3379 100644 --- a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go @@ -15,8 +15,7 @@ type Quota struct { // Control - Context to be used by storage driver (e.g. overlay) // who wants to apply project quotas to container dirs -type Control struct { -} +type Control struct{} func NewControl(basePath string) (*Control, error) { return nil, errors.New("filesystem does not support, or has not enabled quotas") diff --git a/vendor/github.com/containers/storage/drivers/template.go b/vendor/github.com/containers/storage/drivers/template.go index 7b96c082d..66ab89f7f 100644 --- a/vendor/github.com/containers/storage/drivers/template.go +++ b/vendor/github.com/containers/storage/drivers/template.go @@ -34,6 +34,7 @@ func NaiveCreateFromTemplate(d TemplateDriver, id, template string, templateIDMa } return err } + defer diff.Close() applyOptions := ApplyDiffOpts{ Diff: diff, diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index bf0cfe940..599cf095d 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -14,19 +14,13 @@ import ( "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/parsers" - "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/system" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/tar/storage" ) -var ( - // CopyDir defines the copy method to use. - CopyDir = dirCopy -) - -const defaultPerms = os.FileMode(0555) +const defaultPerms = os.FileMode(0o555) func init() { graphdriver.MustRegister("vfs", Init) @@ -42,11 +36,10 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) } rootIDs := d.idMappings.RootPair() - if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil { + if err := idtools.MkdirAllAndChown(filepath.Join(home, "dir"), 0o700, rootIDs); err != nil { return nil, err } for _, option := range options.DriverOptions { - key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err @@ -69,6 +62,12 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) return nil, fmt.Errorf("vfs driver does not support %s options", key) } } + // If --imagestore is provided, lets add writable graphRoot + // to vfs's additional image store, as it is done for + // `overlay` driver. + if options.ImageStore != "" { + d.homes = append(d.homes, options.ImageStore) + } d.updater = graphdriver.NewNaiveLayerIDMapUpdater(d) d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d.updater) @@ -161,7 +160,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool dir := d.dir(id) rootIDs := idMappings.RootPair() - if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0700, rootIDs); err != nil { + if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0o700, rootIDs); err != nil { return err } @@ -173,7 +172,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool rootPerms := defaultPerms if runtime.GOOS == "darwin" { - rootPerms = os.FileMode(0700) + rootPerms = os.FileMode(0o700) } if parent != "" { @@ -203,7 +202,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool } return nil - } func (d *Driver) dir(id string) string { @@ -228,15 +226,12 @@ func (d *Driver) Remove(id string) error { // Get returns the directory for the given id. func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { dir := d.dir(id) - switch len(options.Options) { - case 0: - case 1: - if options.Options[0] == "ro" { + + for _, opt := range options.Options { + if opt == "ro" { // ignore "ro" option - break + continue } - fallthrough - default: return "", fmt.Errorf("vfs driver does not support mount options") } if st, err := os.Stat(dir); err != nil { @@ -268,7 +263,7 @@ func (d *Driver) Exists(id string) bool { // List layers (not including additional image stores) func (d *Driver) ListLayers() ([]string, error) { - entries, err := os.ReadDir(d.homes[0]) + entries, err := os.ReadDir(filepath.Join(d.homes[0], "dir")) if err != nil { return nil, err } @@ -278,7 +273,7 @@ func (d *Driver) ListLayers() ([]string, error) { for _, entry := range entries { id := entry.Name() // Does it look like a datadir directory? - if !entry.IsDir() || stringid.ValidateID(id) != nil { + if !entry.IsDir() { continue } @@ -304,7 +299,15 @@ func (d *Driver) SupportsShifting() bool { // UpdateLayerIDMap updates ID mappings in a from matching the ones specified // by toContainer to those specified by toHost. func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { - return d.updater.UpdateLayerIDMap(id, toContainer, toHost, mountLabel) + if err := d.updater.UpdateLayerIDMap(id, toContainer, toHost, mountLabel); err != nil { + return err + } + dir := d.dir(id) + rootIDs, err := toHost.ToHost(idtools.IDPair{UID: 0, GID: 0}) + if err != nil { + return err + } + return os.Chown(dir, rootIDs.UID, rootIDs.GID) } // Changes produces a list of changes between the specified layer diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go index 66aa460cf..8c2dc18ae 100644 --- a/vendor/github.com/containers/storage/drivers/windows/windows.go +++ b/vendor/github.com/containers/storage/drivers/windows/windows.go @@ -64,8 +64,7 @@ func init() { } } -type checker struct { -} +type checker struct{} func (c *checker) IsMounted(path string) bool { return false @@ -102,7 +101,7 @@ func InitFilter(home string, options graphdriver.Options) (graphdriver.Driver, e return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home) } - if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil { + if err := idtools.MkdirAllAs(home, 0o700, 0, 0); err != nil { return nil, fmt.Errorf("windowsfilter failed to create '%s': %w", home, err) } @@ -885,7 +884,7 @@ func (d *Driver) resolveID(id string) (string, error) { // setID stores the layerId in disk. func (d *Driver) setID(id, altID string) error { - return os.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) + return os.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0o600) } // getLayerChain returns the layer chain information. @@ -915,7 +914,7 @@ func (d *Driver) setLayerChain(id string, chain []string) error { } jPath := filepath.Join(d.dir(id), "layerchain.json") - err = os.WriteFile(jPath, content, 0600) + err = os.WriteFile(jPath, content, 0o600) if err != nil { return fmt.Errorf("unable to write layerchain file - %s", err) } diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go index aeef64103..e02289784 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -30,7 +30,7 @@ type zfsOptions struct { mountOptions string } -const defaultPerms = os.FileMode(0555) +const defaultPerms = os.FileMode(0o555) func init() { graphdriver.MustRegister("zfs", Init) @@ -57,7 +57,7 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) { return nil, fmt.Errorf("the 'zfs' command is not available: %w", graphdriver.ErrPrerequisites) } - file, err := unix.Open("/dev/zfs", unix.O_RDWR, 0600) + file, err := unix.Open("/dev/zfs", unix.O_RDWR, 0o600) if err != nil { logger.Debugf("cannot open /dev/zfs: %v", err) return nil, fmt.Errorf("could not open /dev/zfs: %v: %w", err, graphdriver.ErrPrerequisites) @@ -110,7 +110,7 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) { if err != nil { return nil, fmt.Errorf("failed to get root uid/gid: %w", err) } - if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(base, 0o700, rootUID, rootGID); err != nil { return nil, fmt.Errorf("failed to create '%s': %w", base, err) } @@ -409,7 +409,6 @@ func (d *Driver) Remove(id string) error { // Get returns the mountpoint for the given id after creating the target directories if necessary. func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { - mountpoint := d.mountPath(id) if count := d.ctr.Increment(mountpoint); count > 1 { return mountpoint, nil @@ -454,7 +453,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr return "", err } // Create the target directories if they don't exist - if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(mountpoint, 0o755, rootUID, rootGID); err != nil { return "", err } @@ -506,7 +505,9 @@ func (d *Driver) Exists(id string) bool { return d.filesystemsCache[d.zfsPath(id)] } -// List layers (not including additional image stores) +// List layers (not including additional image stores). Our layers aren't all +// dependent on a single well-known dataset, so we can't reliably tell which +// datasets are ours and which ones just look like they could be ours. func (d *Driver) ListLayers() ([]string, error) { return nil, graphdriver.ErrNotSupported } diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index 9dd196e8b..d71eab08b 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -568,26 +568,28 @@ func (r *imageStore) Save() error { } r.lockfile.AssertLockedForWriting() rpath := r.imagespath() - if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { + if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil { return err } jdata, err := json.Marshal(&r.images) if err != nil { return err } - if err := ioutils.AtomicWriteFile(rpath, jdata, 0600); err != nil { - return err - } + // This must be done before we write the file, because the process could be terminated + // after the file is written but before the lock file is updated. lw, err := r.lockfile.RecordWrite() if err != nil { return err } r.lastWrite = lw + if err := ioutils.AtomicWriteFile(rpath, jdata, 0o600); err != nil { + return err + } return nil } func newImageStore(dir string) (rwImageStore, error) { - if err := os.MkdirAll(dir, 0700); err != nil { + if err := os.MkdirAll(dir, 0o700); err != nil { return nil, err } lockfile, err := lockfile.GetLockFile(filepath.Join(dir, "images.lock")) @@ -1015,11 +1017,11 @@ func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest if key == "" { return fmt.Errorf("can't set empty name for image big data item: %w", ErrInvalidBigDataName) } - err := os.MkdirAll(r.datadir(image.ID), 0700) + err := os.MkdirAll(r.datadir(image.ID), 0o700) if err != nil { return err } - err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600) + err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0o600) if err == nil { save := false if image.BigDataSizes == nil { diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index 3f37405b0..e5835757f 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -314,9 +314,6 @@ type rwLayerStore interface { // Clean up unreferenced layers GarbageCollect() error - - // supportsShifting() returns true if the driver.Driver.SupportsShifting(). - supportsShifting() bool } type layerStore struct { @@ -657,7 +654,6 @@ func (r *layerStore) Layers() ([]Layer, error) { // Requires startWriting. func (r *layerStore) GarbageCollect() error { layers, err := r.driver.ListLayers() - if err != nil { if errors.Is(err, drivers.ErrNotSupported) { return nil @@ -864,33 +860,35 @@ func (r *layerStore) loadMounts() error { return err } layerMounts := []layerMountPoint{} - if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil { - // Clear all of our mount information. If another process - // unmounted something, it (along with its zero count) won't - // have been encoded into the version of mountpoints.json that - // we're loading, so our count could fall out of sync with it - // if we don't, and if we subsequently change something else, - // we'd pass that error along to other process that reloaded - // the data after we saved it. - for _, layer := range r.layers { - layer.MountPoint = "" - layer.MountCount = 0 - } - // All of the non-zero count values will have been encoded, so - // we reset the still-mounted ones based on the contents. - for _, mount := range layerMounts { - if mount.MountPoint != "" { - if layer, ok := r.lookup(mount.ID); ok { - mounts[mount.MountPoint] = layer - layer.MountPoint = mount.MountPoint - layer.MountCount = mount.MountCount - } + if len(data) != 0 { + if err := json.Unmarshal(data, &layerMounts); err != nil { + return err + } + } + // Clear all of our mount information. If another process + // unmounted something, it (along with its zero count) won't + // have been encoded into the version of mountpoints.json that + // we're loading, so our count could fall out of sync with it + // if we don't, and if we subsequently change something else, + // we'd pass that error along to other process that reloaded + // the data after we saved it. + for _, layer := range r.layers { + layer.MountPoint = "" + layer.MountCount = 0 + } + // All of the non-zero count values will have been encoded, so + // we reset the still-mounted ones based on the contents. + for _, mount := range layerMounts { + if mount.MountPoint != "" { + if layer, ok := r.lookup(mount.ID); ok { + mounts[mount.MountPoint] = layer + layer.MountPoint = mount.MountPoint + layer.MountCount = mount.MountCount } } - err = nil } r.bymount = mounts - return err + return nil } // save saves the contents of the store to disk. @@ -920,13 +918,21 @@ func (r *layerStore) saveLayers(saveLocations layerLocations) error { } r.lockfile.AssertLockedForWriting() + // This must be done before we write the file, because the process could be terminated + // after the file is written but before the lock file is updated. + lw, err := r.lockfile.RecordWrite() + if err != nil { + return err + } + r.lastWrite = lw + for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ { location := layerLocationFromIndex(locationIndex) if location&saveLocations == 0 { continue } rpath := r.jsonPath[locationIndex] - if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { + if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil { return err } subsetLayers := make([]*Layer, 0, len(r.layers)) @@ -944,16 +950,11 @@ func (r *layerStore) saveLayers(saveLocations layerLocations) error { if location == volatileLayerLocation { opts.NoSync = true } - if err := ioutils.AtomicWriteFileWithOpts(rpath, jldata, 0600, &opts); err != nil { + if err := ioutils.AtomicWriteFileWithOpts(rpath, jldata, 0o600, &opts); err != nil { return err } r.layerspathsModified[locationIndex] = opts.ModTime } - lw, err := r.lockfile.RecordWrite() - if err != nil { - return err - } - r.lastWrite = lw return nil } @@ -965,7 +966,7 @@ func (r *layerStore) saveMounts() error { } r.mountsLockfile.AssertLockedForWriting() mpath := r.mountspath() - if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil { + if err := os.MkdirAll(filepath.Dir(mpath), 0o700); err != nil { return err } mounts := make([]layerMountPoint, 0, len(r.layers)) @@ -982,22 +983,26 @@ func (r *layerStore) saveMounts() error { if err != nil { return err } - if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil { - return err - } + + // This must be done before we write the file, because the process could be terminated + // after the file is written but before the lock file is updated. lw, err := r.mountsLockfile.RecordWrite() if err != nil { return err } r.mountsLastWrite = lw + + if err = ioutils.AtomicWriteFile(mpath, jmdata, 0o600); err != nil { + return err + } return r.loadMounts() } func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver, transient bool) (rwLayerStore, error) { - if err := os.MkdirAll(rundir, 0700); err != nil { + if err := os.MkdirAll(rundir, 0o700); err != nil { return nil, err } - if err := os.MkdirAll(layerdir, 0700); err != nil { + if err := os.MkdirAll(layerdir, 0o700); err != nil { return nil, err } // Note: While the containers.lock file is in rundir for transient stores @@ -1213,10 +1218,10 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount if !r.lockfile.IsReadWrite() { return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) } - if err := os.MkdirAll(r.rundir, 0700); err != nil { + if err := os.MkdirAll(r.rundir, 0o700); err != nil { return nil, -1, err } - if err := os.MkdirAll(r.layerdir, 0700); err != nil { + if err := os.MkdirAll(r.layerdir, 0o700); err != nil { return nil, -1, err } if id == "" { @@ -1690,7 +1695,7 @@ func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error if key == "" { return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName) } - err := os.MkdirAll(r.datadir(layer.ID), 0700) + err := os.MkdirAll(r.datadir(layer.ID), 0o700) if err != nil { return err } @@ -1698,7 +1703,7 @@ func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error // NewAtomicFileWriter doesn't overwrite/truncate the existing inode. // BigData() relies on this behaviour when opening the file for read // so that it is either accessing the old data or the new one. - writer, err := ioutils.NewAtomicFileWriter(r.datapath(layer.ID, key), 0600) + writer, err := ioutils.NewAtomicFileWriter(r.datapath(layer.ID, key), 0o600) if err != nil { return fmt.Errorf("opening bigdata file: %w", err) } @@ -1922,6 +1927,18 @@ func (r *layerStore) Wipe() error { return err } } + ids, err := r.driver.ListLayers() + if err != nil { + if !errors.Is(err, drivers.ErrNotSupported) { + return err + } + ids = nil + } + for _, id := range ids { + if err := r.driver.Remove(id); err != nil { + return err + } + } return nil } @@ -2198,7 +2215,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, return -1, err } compression := archive.DetectCompression(header[:n]) - defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), diff) + defragmented := io.MultiReader(bytes.NewReader(header[:n]), diff) // Decide if we need to compute digests var compressedDigest, uncompressedDigest digest.Digest // = "" @@ -2226,54 +2243,63 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, defragmented = io.TeeReader(defragmented, compressedCounter) tsdata := bytes.Buffer{} - compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed) - if err != nil { - compressor = pgzip.NewWriter(&tsdata) - } - if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that - logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err) - } - metadata := storage.NewJSONPacker(compressor) - uncompressed, err := archive.DecompressStream(defragmented) - if err != nil { - return -1, err - } - defer uncompressed.Close() uidLog := make(map[uint32]struct{}) gidLog := make(map[uint32]struct{}) - idLogger, err := tarlog.NewLogger(func(h *tar.Header) { - if !strings.HasPrefix(path.Base(h.Name), archive.WhiteoutPrefix) { - uidLog[uint32(h.Uid)] = struct{}{} - gidLog[uint32(h.Gid)] = struct{}{} + var uncompressedCounter *ioutils.WriteCounter + + size, err = func() (int64, error) { // A scope for defer + compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed) + if err != nil { + return -1, err } - }) - if err != nil { - return -1, err - } - defer idLogger.Close() - uncompressedCounter := ioutils.NewWriteCounter(idLogger) - uncompressedWriter := (io.Writer)(uncompressedCounter) - if uncompressedDigester != nil { - uncompressedWriter = io.MultiWriter(uncompressedWriter, uncompressedDigester.Hash()) - } - payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, uncompressedWriter), metadata, storage.NewDiscardFilePutter()) - if err != nil { - return -1, err - } - options := drivers.ApplyDiffOpts{ - Diff: payload, - Mappings: r.layerMappings(layer), - MountLabel: layer.MountLabel, - } - size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, options) + defer compressor.Close() // This must happen before tsdata is consumed. + if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that + logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err) + } + metadata := storage.NewJSONPacker(compressor) + uncompressed, err := archive.DecompressStream(defragmented) + if err != nil { + return -1, err + } + defer uncompressed.Close() + idLogger, err := tarlog.NewLogger(func(h *tar.Header) { + if !strings.HasPrefix(path.Base(h.Name), archive.WhiteoutPrefix) { + uidLog[uint32(h.Uid)] = struct{}{} + gidLog[uint32(h.Gid)] = struct{}{} + } + }) + if err != nil { + return -1, err + } + defer idLogger.Close() // This must happen before uidLog and gidLog is consumed. + uncompressedCounter = ioutils.NewWriteCounter(idLogger) + uncompressedWriter := (io.Writer)(uncompressedCounter) + if uncompressedDigester != nil { + uncompressedWriter = io.MultiWriter(uncompressedWriter, uncompressedDigester.Hash()) + } + payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, uncompressedWriter), metadata, storage.NewDiscardFilePutter()) + if err != nil { + return -1, err + } + options := drivers.ApplyDiffOpts{ + Diff: payload, + Mappings: r.layerMappings(layer), + MountLabel: layer.MountLabel, + } + size, err := r.driver.ApplyDiff(layer.ID, layer.Parent, options) + if err != nil { + return -1, err + } + return size, err + }() if err != nil { return -1, err } - compressor.Close() - if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil { + + if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0o700); err != nil { return -1, err } - if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0600); err != nil { + if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0o600); err != nil { return -1, err } if compressedDigester != nil { @@ -2366,8 +2392,26 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, layer.UncompressedDigest = diffOutput.UncompressedDigest layer.UncompressedSize = diffOutput.Size layer.Metadata = diffOutput.Metadata - if err = r.saveFor(layer); err != nil { - return err + if len(diffOutput.TarSplit) != 0 { + tsdata := bytes.Buffer{} + compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed) + if err != nil { + compressor = pgzip.NewWriter(&tsdata) + } + if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that + logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err) + } + if _, err := compressor.Write(diffOutput.TarSplit); err != nil { + compressor.Close() + return err + } + compressor.Close() + if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0o700); err != nil { + return err + } + if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0o600); err != nil { + return err + } } for k, v := range diffOutput.BigData { if err := r.SetBigData(id, k, bytes.NewReader(v)); err != nil { @@ -2377,6 +2421,9 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, return err } } + if err = r.saveFor(layer); err != nil { + return err + } return err } @@ -2443,10 +2490,6 @@ func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error return r.layersByDigestMap(r.byuncompressedsum, d) } -func (r *layerStore) supportsShifting() bool { - return r.driver.SupportsShifting() -} - func closeAll(closes ...func() error) (rErr error) { for _, f := range closes { if err := f(); err != nil { diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 6209205b3..29f800b2a 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -131,16 +131,6 @@ const ( OverlayWhiteoutFormat ) -const ( - modeISDIR = 040000 // Directory - modeISFIFO = 010000 // FIFO - modeISREG = 0100000 // Regular file - modeISLNK = 0120000 // Symbolic link - modeISBLK = 060000 // Block special file - modeISCHR = 020000 // Character special file - modeISSOCK = 0140000 // Socket -) - // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { @@ -328,7 +318,6 @@ func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModi } pipeWriter.Close() - }() return pipeReader } @@ -359,7 +348,7 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro if err != nil { return nil, err } - hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) name, err = canonicalTarName(name, fi.IsDir()) if err != nil { return nil, fmt.Errorf("tar: cannot canonicalize path: %w", err) @@ -371,31 +360,6 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro return hdr, nil } -// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar -// https://github.com/golang/go/commit/66b5a2f -func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { - fm := fi.Mode() - switch { - case fm.IsRegular(): - mode |= modeISREG - case fi.IsDir(): - mode |= modeISDIR - case fm&os.ModeSymlink != 0: - mode |= modeISLNK - case fm&os.ModeDevice != 0: - if fm&os.ModeCharDevice != 0 { - mode |= modeISCHR - } else { - mode |= modeISBLK - } - case fm&os.ModeNamedPipe != 0: - mode |= modeISFIFO - case fm&os.ModeSocket != 0: - mode |= modeISSOCK - } - return mode -} - // ReadSecurityXattrToTarHeader reads security.capability, security,image // xattrs from filesystem to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { @@ -552,9 +516,9 @@ func (ta *tarAppender) addTarFile(path, name string) error { } } - //handle re-mapping container ID mappings back to host ID mappings before - //writing tar headers/files. We skip whiteout files because they were written - //by the kernel and already have proper ownership relative to the host + // handle re-mapping container ID mappings back to host ID mappings before + // writing tar headers/files. We skip whiteout files because they were written + // by the kernel and already have proper ownership relative to the host if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { @@ -702,7 +666,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } if forceMask != nil && (hdr.Typeflag != tar.TypeSymlink || runtime.GOOS == "darwin") { - value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&07777) + value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&0o7777) if err := system.Lsetxattr(path, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil { return err } @@ -800,7 +764,6 @@ func Tar(path string, compression Compression) (io.ReadCloser, error) { // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) @@ -1032,7 +995,7 @@ loop: parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) + err = idtools.MkdirAllAndChownNew(parentPath, 0o777, rootIDs) if err != nil { return err } @@ -1239,7 +1202,7 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error { } // Create dst, copy src's content into it logrus.Debugf("Creating dest directory: %s", dst) - if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + if err := idtools.MkdirAllAndChownNew(dst, 0o755, rootIDs); err != nil { return err } logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) @@ -1266,7 +1229,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary - if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil { + if err := os.MkdirAll(filepath.Dir(dst), 0o700); err != nil { return err } @@ -1422,7 +1385,7 @@ func IsArchive(header []byte) bool { if compression != Uncompressed { return true } - r := tar.NewReader(bytes.NewBuffer(header)) + r := tar.NewReader(bytes.NewReader(header)) _, err := r.Next() return err == nil } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go index 775bd0766..02995d767 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -153,8 +153,7 @@ func (overlayWhiteoutConverter) ConvertReadWithHandler(hdr *tar.Header, path str return true, nil } -type directHandler struct { -} +type directHandler struct{} func (d directHandler) Setxattr(path, name string, value []byte) error { return unix.Setxattr(path, name, value, 0) @@ -185,7 +184,7 @@ func GetFileOwner(path string) (uint32, uint32, uint32, error) { } s, ok := f.Sys().(*syscall.Stat_t) if ok { - return s.Uid, s.Gid, s.Mode & 07777, nil + return s.Uid, s.Gid, s.Mode & 0o7777, nil } return 0, 0, uint32(f.Mode()), nil } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go index f8a34c831..88192f220 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go @@ -88,7 +88,7 @@ func minor(device uint64) uint64 { // handleTarTypeBlockCharFifo is an OS-specific helper function used by // createTarFile to handle the following types of header: Block; Char; Fifo func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - mode := uint32(hdr.Mode & 07777) + mode := uint32(hdr.Mode & 0o7777) switch hdr.Typeflag { case tar.TypeBlock: mode |= unix.S_IFBLK diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go index e44011775..85a5b3a5d 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go @@ -38,18 +38,17 @@ func CanonicalTarNameForPath(p string) (string, error) { return "", fmt.Errorf("windows path contains forward slash: %s", p) } return strings.Replace(p, string(os.PathSeparator), "/", -1), nil - } // chmodTarEntry is used to adjust the file permissions used in tar header based // on the platform the archival is done. func chmodTarEntry(perm os.FileMode) os.FileMode { - //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + // perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) permPart := perm & os.ModePerm noPermPart := perm &^ os.ModePerm // Add the x bit: make everything +x from windows - permPart |= 0111 - permPart &= 0755 + permPart |= 0o111 + permPart &= 0o755 return noPermPart | permPart } diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go index fc705484e..01c6f30c2 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes.go @@ -131,9 +131,11 @@ func isENOTDIR(err error) bool { return false } -type skipChange func(string) (bool, error) -type deleteChange func(string, string, os.FileInfo) (string, error) -type whiteoutChange func(string, string) (bool, error) +type ( + skipChange func(string) (bool, error) + deleteChange func(string, string, os.FileInfo) (string, error) + whiteoutChange func(string, string) (bool, error) +) func changes(layers []string, rw string, dc deleteChange, sc skipChange, wc whiteoutChange) ([]Change, error) { var ( @@ -299,7 +301,6 @@ func (info *FileInfo) path() string { } func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { - sizeAtEntry := len(*changes) if oldInfo == nil { @@ -373,7 +374,6 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) (*changes)[sizeAtEntry] = change } - } // Changes add changes to file information. @@ -398,9 +398,7 @@ func newRootFileInfo(idMappings *idtools.IDMappings) *FileInfo { // ChangesDirs compares two directories and generates an array of Change objects describing the changes. // If oldDir is "", then all files in newDir will be Add-Changes. func ChangesDirs(newDir string, newMappings *idtools.IDMappings, oldDir string, oldMappings *idtools.IDMappings) ([]Change, error) { - var ( - oldRoot, newRoot *FileInfo - ) + var oldRoot, newRoot *FileInfo if oldDir == "" { emptyDir, err := os.MkdirTemp("", "empty") if err != nil { diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go index 77d3d6f51..f8414717b 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go @@ -397,5 +397,4 @@ func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (str // We didn't find the same path in any older layers, so it was new in this one. return "", nil - } diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go index 966400e59..1bab94fa5 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go @@ -7,7 +7,6 @@ import ( ) func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool { - // Don't look at size for dirs, its not a good measure of change if oldStat.Mtim() != newStat.Mtim() || oldStat.Mode() != newStat.Mode() || diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go index 2c714e8da..55f753bf4 100644 --- a/vendor/github.com/containers/storage/pkg/archive/copy.go +++ b/vendor/github.com/containers/storage/pkg/archive/copy.go @@ -297,7 +297,6 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir } return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil } - } // RebaseArchiveEntries rewrites the given srcContent archive replacing diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go index 8fec5af38..713551859 100644 --- a/vendor/github.com/containers/storage/pkg/archive/diff.go +++ b/vendor/github.com/containers/storage/pkg/archive/diff.go @@ -85,7 +85,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = os.MkdirAll(parentPath, 0755) + err = os.MkdirAll(parentPath, 0o755) if err != nil { return 0, err } diff --git a/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go index 14661c411..92b8d05ed 100644 --- a/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go +++ b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go @@ -98,7 +98,7 @@ func parseFileFlags(fflags string) (uint32, uint32, error) { } func formatFileFlags(fflags uint32) (string, error) { - var res = []string{} + res := []string{} for fflags != 0 { // Extract lowest set bit fflag := uint32(1) << bits.TrailingZeros32(fflags) diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go index 2de95f39a..f221a2283 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go @@ -77,7 +77,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions dest = filepath.Clean(dest) if _, err := os.Stat(dest); os.IsNotExist(err) { - if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil { + if err := idtools.MkdirAllAndChownNew(dest, 0o755, rootIDs); err != nil { return err } } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go index 42ee39f48..f7a16e9f9 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go @@ -8,7 +8,8 @@ import ( func invokeUnpack(decompressedArchive io.Reader, dest string, - options *archive.TarOptions, root string) error { + options *archive.TarOptions, root string, +) error { return archive.Unpack(decompressedArchive, dest, options) } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go index 8cc0f33b3..259f8c99a 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go @@ -27,7 +27,7 @@ func untar() { var options archive.TarOptions - //read the options from the pipe "ExtraFiles" + // read the options from the pipe "ExtraFiles" if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { fatal(err) } @@ -99,7 +99,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T return fmt.Errorf("untar error on re-exec cmd: %w", err) } - //write the options to the pipe for the untar exec to read + // write the options to the pipe for the untar exec to read if err := json.NewEncoder(w).Encode(options); err != nil { w.Close() return fmt.Errorf("untar json encode to pipe failed: %w", err) diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go index 1395ff8cd..745502204 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go @@ -14,7 +14,8 @@ func chroot(path string) error { func invokeUnpack(decompressedArchive io.Reader, dest string, - options *archive.TarOptions, root string) error { + options *archive.TarOptions, root string, +) error { // Windows is different to Linux here because Windows does not support // chroot. Hence there is no point sandboxing a chrooted process to // do the unpack. We call inline instead within the daemon process. diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go index 90f453913..71ed094d1 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go @@ -26,7 +26,6 @@ type applyLayerResponse struct { // used on Windows as it does not support chroot, hence no point sandboxing // through chroot and rexec. func applyLayer() { - var ( tmpDir string err error diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go index 7efd12373..cd13212e6 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -48,8 +48,10 @@ type layersCache struct { created time.Time } -var cacheMutex sync.Mutex -var cache *layersCache +var ( + cacheMutex sync.Mutex + cache *layersCache +) func (c *layersCache) release() { cacheMutex.Lock() @@ -514,14 +516,14 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) { iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { - if field != "entries" { + if strings.ToLower(field) != "entries" { iter.Skip() continue } for iter.ReadArray() { for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { - switch field { - case "type", "name", "linkName", "digest", "chunkDigest", "chunkType": + switch strings.ToLower(field) { + case "type", "name", "linkname", "digest", "chunkdigest", "chunktype", "modtime", "accesstime", "changetime": count += len(iter.ReadStringAsSlice()) case "xattrs": for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { @@ -546,33 +548,33 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) { iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { - if field == "version" { + if strings.ToLower(field) == "version" { toc.Version = iter.ReadInt() continue } - if field != "entries" { + if strings.ToLower(field) != "entries" { iter.Skip() continue } for iter.ReadArray() { var m internal.FileMetadata for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { - switch field { + switch strings.ToLower(field) { case "type": m.Type = getString(iter.ReadStringAsSlice()) case "name": m.Name = getString(iter.ReadStringAsSlice()) - case "linkName": + case "linkname": m.Linkname = getString(iter.ReadStringAsSlice()) case "mode": m.Mode = iter.ReadInt64() case "size": m.Size = iter.ReadInt64() - case "UID": + case "uid": m.UID = iter.ReadInt() - case "GID": + case "gid": m.GID = iter.ReadInt() - case "ModTime": + case "modtime": time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) if err != nil { return nil, err @@ -590,23 +592,23 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) { return nil, err } m.ChangeTime = &time - case "devMajor": + case "devmajor": m.Devmajor = iter.ReadInt64() - case "devMinor": + case "devminor": m.Devminor = iter.ReadInt64() case "digest": m.Digest = getString(iter.ReadStringAsSlice()) case "offset": m.Offset = iter.ReadInt64() - case "endOffset": + case "endoffset": m.EndOffset = iter.ReadInt64() - case "chunkSize": + case "chunksize": m.ChunkSize = iter.ReadInt64() - case "chunkOffset": + case "chunkoffset": m.ChunkOffset = iter.ReadInt64() - case "chunkDigest": + case "chunkdigest": m.ChunkDigest = getString(iter.ReadStringAsSlice()) - case "chunkType": + case "chunktype": m.ChunkType = getString(iter.ReadStringAsSlice()) case "xattrs": m.Xattrs = make(map[string]string) diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go index 9333ed65c..2ee79dd23 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go @@ -150,22 +150,32 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, // readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must // be specified. // This function uses the io.github.containers.zstd-chunked. annotations when specified. -func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) { +func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, []byte, int64, error) { footerSize := int64(internal.FooterSizeSupported) if blobSize <= footerSize { - return nil, 0, errors.New("blob too small") + return nil, nil, 0, errors.New("blob too small") } manifestChecksumAnnotation := annotations[internal.ManifestChecksumKey] if manifestChecksumAnnotation == "" { - return nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey) + return nil, nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey) } var offset, length, lengthUncompressed, manifestType uint64 + var offsetTarSplit, lengthTarSplit, lengthUncompressedTarSplit uint64 + tarSplitChecksumAnnotation := "" + if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" { if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil { - return nil, 0, err + return nil, nil, 0, err + } + + if tarSplitInfoKeyAnnotation, found := annotations[internal.TarSplitInfoKey]; found { + if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &offsetTarSplit, &lengthTarSplit, &lengthUncompressedTarSplit); err != nil { + return nil, nil, 0, err + } + tarSplitChecksumAnnotation = annotations[internal.TarSplitChecksumKey] } } else { chunk := ImageSourceChunk{ @@ -174,39 +184,39 @@ func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable } parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) if err != nil { - return nil, 0, err + return nil, nil, 0, err } var reader io.ReadCloser select { case r := <-parts: reader = r case err := <-errs: - return nil, 0, err + return nil, nil, 0, err } footer := make([]byte, footerSize) if _, err := io.ReadFull(reader, footer); err != nil { - return nil, 0, err + return nil, nil, 0, err } offset = binary.LittleEndian.Uint64(footer[0:8]) length = binary.LittleEndian.Uint64(footer[8:16]) lengthUncompressed = binary.LittleEndian.Uint64(footer[16:24]) manifestType = binary.LittleEndian.Uint64(footer[24:32]) - if !isZstdChunkedFrameMagic(footer[32:40]) { - return nil, 0, errors.New("invalid magic number") + if !isZstdChunkedFrameMagic(footer[48:56]) { + return nil, nil, 0, errors.New("invalid magic number") } } if manifestType != internal.ManifestTypeCRFS { - return nil, 0, errors.New("invalid manifest type") + return nil, nil, 0, errors.New("invalid manifest type") } // set a reasonable limit if length > (1<<20)*50 { - return nil, 0, errors.New("manifest too big") + return nil, nil, 0, errors.New("manifest too big") } if lengthUncompressed > (1<<20)*50 { - return nil, 0, errors.New("manifest too big") + return nil, nil, 0, errors.New("manifest too big") } chunk := ImageSourceChunk{ @@ -214,47 +224,86 @@ func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable Length: length, } - parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) + chunks := []ImageSourceChunk{chunk} + + if offsetTarSplit > 0 { + chunkTarSplit := ImageSourceChunk{ + Offset: offsetTarSplit, + Length: lengthTarSplit, + } + chunks = append(chunks, chunkTarSplit) + } + + parts, errs, err := blobStream.GetBlobAt(chunks) if err != nil { - return nil, 0, err + return nil, nil, 0, err } - var reader io.ReadCloser - select { - case r := <-parts: - reader = r - case err := <-errs: - return nil, 0, err + + readBlob := func(len uint64) ([]byte, error) { + var reader io.ReadCloser + select { + case r := <-parts: + reader = r + case err := <-errs: + return nil, err + } + + blob := make([]byte, len) + if _, err := io.ReadFull(reader, blob); err != nil { + reader.Close() + return nil, err + } + if err := reader.Close(); err != nil { + return nil, err + } + return blob, nil } - manifest := make([]byte, length) - if _, err := io.ReadFull(reader, manifest); err != nil { - return nil, 0, err + manifest, err := readBlob(length) + if err != nil { + return nil, nil, 0, err } - manifestDigester := digest.Canonical.Digester() - manifestChecksum := manifestDigester.Hash() - if _, err := manifestChecksum.Write(manifest); err != nil { - return nil, 0, err + decodedBlob, err := decodeAndValidateBlob(manifest, lengthUncompressed, manifestChecksumAnnotation) + if err != nil { + return nil, nil, 0, err + } + decodedTarSplit := []byte{} + if offsetTarSplit > 0 { + tarSplit, err := readBlob(lengthTarSplit) + if err != nil { + return nil, nil, 0, err + } + + decodedTarSplit, err = decodeAndValidateBlob(tarSplit, lengthUncompressedTarSplit, tarSplitChecksumAnnotation) + if err != nil { + return nil, nil, 0, err + } } + return decodedBlob, decodedTarSplit, int64(offset), err +} - d, err := digest.Parse(manifestChecksumAnnotation) +func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedUncompressedChecksum string) ([]byte, error) { + d, err := digest.Parse(expectedUncompressedChecksum) if err != nil { - return nil, 0, err + return nil, err } - if manifestDigester.Digest() != d { - return nil, 0, errors.New("invalid manifest checksum") + + blobDigester := d.Algorithm().Digester() + blobChecksum := blobDigester.Hash() + if _, err := blobChecksum.Write(blob); err != nil { + return nil, err + } + if blobDigester.Digest() != d { + return nil, fmt.Errorf("invalid blob checksum, expected checksum %s, got %s", d, blobDigester.Digest()) } decoder, err := zstd.NewReader(nil) //nolint:contextcheck if err != nil { - return nil, 0, err + return nil, err } defer decoder.Close() b := make([]byte, 0, lengthUncompressed) - if decoded, err := decoder.DecodeAll(manifest, b); err == nil { - return decoded, int64(offset), nil - } - - return manifest, int64(offset), nil + return decoder.DecodeAll(blob, b) } diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go index 2a9bdc675..ca7ce30f7 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go @@ -6,17 +6,23 @@ package compressor import ( "bufio" + "bytes" "encoding/base64" "io" "github.com/containers/storage/pkg/chunked/internal" "github.com/containers/storage/pkg/ioutils" + "github.com/klauspost/compress/zstd" "github.com/opencontainers/go-digest" "github.com/vbatts/tar-split/archive/tar" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" ) -const RollsumBits = 16 -const holesThreshold = int64(1 << 10) +const ( + RollsumBits = 16 + holesThreshold = int64(1 << 10) +) type holesFinder struct { reader *bufio.Reader @@ -196,11 +202,55 @@ type chunk struct { ChunkType string } +type tarSplitData struct { + compressed *bytes.Buffer + digester digest.Digester + uncompressedCounter *ioutils.WriteCounter + zstd *zstd.Encoder + packer storage.Packer +} + +func newTarSplitData(level int) (*tarSplitData, error) { + compressed := bytes.NewBuffer(nil) + digester := digest.Canonical.Digester() + + zstdWriter, err := internal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level) + if err != nil { + return nil, err + } + + uncompressedCounter := ioutils.NewWriteCounter(zstdWriter) + metaPacker := storage.NewJSONPacker(uncompressedCounter) + + return &tarSplitData{ + compressed: compressed, + digester: digester, + uncompressedCounter: uncompressedCounter, + zstd: zstdWriter, + packer: metaPacker, + }, nil +} + func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error { // total written so far. Used to retrieve partial offsets in the file dest := ioutils.NewWriteCounter(destFile) - tr := tar.NewReader(reader) + tarSplitData, err := newTarSplitData(level) + if err != nil { + return err + } + defer func() { + if tarSplitData.zstd != nil { + tarSplitData.zstd.Close() + } + }() + + its, err := asm.NewInputTarStream(reader, tarSplitData.packer, nil) + if err != nil { + return err + } + + tr := tar.NewReader(its) tr.RawAccounting = true buf := make([]byte, 4096) @@ -212,7 +262,6 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r defer func() { if zstdWriter != nil { zstdWriter.Close() - zstdWriter.Flush() } }() @@ -222,9 +271,6 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r if err := zstdWriter.Close(); err != nil { return 0, err } - if err := zstdWriter.Flush(); err != nil { - return 0, err - } offset = dest.Count zstdWriter.Reset(dest) } @@ -371,9 +417,11 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r rawBytes := tr.RawBytes() if _, err := zstdWriter.Write(rawBytes); err != nil { + zstdWriter.Close() return err } if err := zstdWriter.Flush(); err != nil { + zstdWriter.Close() return err } if err := zstdWriter.Close(); err != nil { @@ -381,7 +429,21 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r } zstdWriter = nil - return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), metadata, level) + if err := tarSplitData.zstd.Flush(); err != nil { + return err + } + if err := tarSplitData.zstd.Close(); err != nil { + return err + } + tarSplitData.zstd = nil + + ts := internal.TarSplitData{ + Data: tarSplitData.compressed.Bytes(), + Digest: tarSplitData.digester.Digest(), + UncompressedSize: tarSplitData.uncompressedCounter.Count, + } + + return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level) } type zstdChunkedWriter struct { diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go index f4dfad822..59df6901e 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go @@ -25,11 +25,15 @@ import ( "math/bits" ) -const windowSize = 64 // Roll assumes windowSize is a power of 2 -const charOffset = 31 +const ( + windowSize = 64 // Roll assumes windowSize is a power of 2 + charOffset = 31 +) -const blobBits = 13 -const blobSize = 1 << blobBits // 8k +const ( + blobBits = 13 + blobSize = 1 << blobBits // 8k +) type RollSum struct { s1, s2 uint32 diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go index 092b03533..49074eadf 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go +++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go @@ -90,6 +90,8 @@ func GetType(t byte) (string, error) { const ( ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum" ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position" + TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum" + TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position" // ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file. ManifestTypeCRFS = 1 @@ -97,7 +99,7 @@ const ( // FooterSizeSupported is the footer size supported by this implementation. // Newer versions of the image format might increase this value, so reject // any version that is not supported. - FooterSizeSupported = 40 + FooterSizeSupported = 56 ) var ( @@ -125,16 +127,23 @@ func appendZstdSkippableFrame(dest io.Writer, data []byte) error { return nil } -func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, metadata []FileMetadata, level int) error { +type TarSplitData struct { + Data []byte + Digest digest.Digest + UncompressedSize int64 +} + +func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, tarSplitData *TarSplitData, metadata []FileMetadata, level int) error { // 8 is the size of the zstd skippable frame header + the frame size - manifestOffset := offset + 8 + const zstdSkippableFrameHeader = 8 + manifestOffset := offset + zstdSkippableFrameHeader toc := TOC{ Version: 1, Entries: metadata, } - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary // Generate the manifest manifest, err := json.Marshal(toc) if err != nil { @@ -167,13 +176,20 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off return err } + outMetadata[TarSplitChecksumKey] = tarSplitData.Digest.String() + tarSplitOffset := manifestOffset + uint64(len(compressedManifest)) + zstdSkippableFrameHeader + outMetadata[TarSplitInfoKey] = fmt.Sprintf("%d:%d:%d", tarSplitOffset, len(tarSplitData.Data), tarSplitData.UncompressedSize) + if err := appendZstdSkippableFrame(dest, tarSplitData.Data); err != nil { + return err + } + // Store the offset to the manifest and its size in LE order manifestDataLE := make([]byte, FooterSizeSupported) binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset) - binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest))) - binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest))) - binary.LittleEndian.PutUint64(manifestDataLE[24:], uint64(ManifestTypeCRFS)) - copy(manifestDataLE[32:], ZstdChunkedFrameMagic) + binary.LittleEndian.PutUint64(manifestDataLE[8*1:], uint64(len(compressedManifest))) + binary.LittleEndian.PutUint64(manifestDataLE[8*2:], uint64(len(manifest))) + binary.LittleEndian.PutUint64(manifestDataLE[8*3:], uint64(ManifestTypeCRFS)) + copy(manifestDataLE[8*4:], ZstdChunkedFrameMagic) return appendZstdSkippableFrame(dest, manifestDataLE) } diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index adc1ad398..a80b28fb5 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -55,6 +55,7 @@ type compressedFileType int type chunkedDiffer struct { stream ImageSourceSeekable manifest []byte + tarSplit []byte layersCache *layersCache tocOffset int64 fileType compressedFileType @@ -64,6 +65,8 @@ type chunkedDiffer struct { gzipReader *pgzip.Reader zstdReader *zstd.Decoder rawReader io.Reader + + tocDigest digest.Digest } var xattrsToIgnore = map[string]interface{}{ @@ -135,6 +138,26 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us return dstFile, st.Size(), nil } +// GetTOCDigest returns the digest of the TOC as recorded in the annotations. +// This is an experimental feature and may be changed/removed in the future. +func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) { + if tocDigest, ok := annotations[estargz.TOCJSONDigestAnnotation]; ok { + d, err := digest.Parse(tocDigest) + if err != nil { + return nil, err + } + return &d, nil + } + if tocDigest, ok := annotations[internal.ManifestChecksumKey]; ok { + d, err := digest.Parse(tocDigest) + if err != nil { + return nil, err + } + return &d, nil + } + return nil, nil +} + // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { if _, ok := annotations[internal.ManifestChecksumKey]; ok { @@ -147,7 +170,7 @@ func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotat } func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) { - manifest, tocOffset, err := readZstdChunkedManifest(ctx, iss, blobSize, annotations) + manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(ctx, iss, blobSize, annotations) if err != nil { return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } @@ -156,13 +179,20 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in return nil, err } + tocDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey]) + if err != nil { + return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[internal.ManifestChecksumKey], err) + } + return &chunkedDiffer{ copyBuffer: makeCopyBuffer(), - stream: iss, - manifest: manifest, + fileType: fileTypeZstdChunked, layersCache: layersCache, + manifest: manifest, + stream: iss, + tarSplit: tarSplit, tocOffset: tocOffset, - fileType: fileTypeZstdChunked, + tocDigest: tocDigest, }, nil } @@ -176,6 +206,11 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize return nil, err } + tocDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation]) + if err != nil { + return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[estargz.TOCJSONDigestAnnotation], err) + } + return &chunkedDiffer{ copyBuffer: makeCopyBuffer(), stream: iss, @@ -183,6 +218,7 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize layersCache: layersCache, tocOffset: tocOffset, fileType: fileTypeEstargz, + tocDigest: tocDigest, }, nil } @@ -363,6 +399,24 @@ func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOption return nil } +func mapToSlice(inputMap map[uint32]struct{}) []uint32 { + var out []uint32 + for value := range inputMap { + out = append(out, value) + } + return out +} + +func collectIDs(entries []internal.FileMetadata) ([]uint32, []uint32) { + uids := make(map[uint32]struct{}) + gids := make(map[uint32]struct{}) + for _, entry := range entries { + uids[uint32(entry.UID)] = struct{}{} + gids[uint32(entry.GID)] = struct{}{} + } + return mapToSlice(uids), mapToSlice(gids) +} + type originFile struct { Root string Path string @@ -558,7 +612,7 @@ func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.Fil func openFileUnderRootOpenat2(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { how := unix.OpenHow{ Flags: flags, - Mode: uint64(mode & 07777), + Mode: uint64(mode & 0o7777), Resolve: unix.RESOLVE_IN_ROOT, } return unix.Openat2(dirfd, name, &how) @@ -636,7 +690,7 @@ func openOrCreateDirUnderRoot(name string, dirfd int, mode os.FileMode) (*os.Fil baseName := filepath.Base(name) - if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, 0755); err2 != nil { + if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, 0o755); err2 != nil { return nil, err } @@ -1271,12 +1325,13 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra } }() - bigData := map[string][]byte{ - bigDataKey: c.manifest, - } output := graphdriver.DriverWithDifferOutput{ - Differ: c, - BigData: bigData, + Differ: c, + TarSplit: c.tarSplit, + BigData: map[string][]byte{ + bigDataKey: c.manifest, + }, + TOCDigest: c.tocDigest, } storeOpts, err := types.DefaultStoreOptionsAutoDetectUID() @@ -1305,6 +1360,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra var missingParts []missingPart + output.UIDs, output.GIDs = collectIDs(toc.Entries) + mergedEntries, totalSize, err := c.mergeTocEntries(c.fileType, toc.Entries) if err != nil { return output, err @@ -1384,7 +1441,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra filesToWaitFor := 0 for i, r := range mergedEntries { if options.ForceMask != nil { - value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&07777) + value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&0o7777) r.Xattrs[containersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value)) r.Mode = int64(*options.ForceMask) } @@ -1579,6 +1636,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra if totalChunksSize > 0 { logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize)) } + return output, nil } diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go index 4d952aba3..cc37ab1d8 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go @@ -9,9 +9,16 @@ import ( storage "github.com/containers/storage" graphdriver "github.com/containers/storage/drivers" + digest "github.com/opencontainers/go-digest" ) // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { - return nil, errors.New("format not supported on this architecture") + return nil, errors.New("format not supported on this system") +} + +// GetTOCDigest returns the digest of the TOC as recorded in the annotations. +// This is an experimental feature and may be changed/removed in the future. +func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) { + return nil, errors.New("format not supported on this system") } diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go index f6e0cfcfe..20d72ca89 100644 --- a/vendor/github.com/containers/storage/pkg/config/config.go +++ b/vendor/github.com/containers/storage/pkg/config/config.go @@ -124,6 +124,11 @@ type OptionsConfig struct { // for shared image content AdditionalImageStores []string `toml:"additionalimagestores,omitempty"` + // ImageStore is the location of image store which is separated from the + // container store. Usually this is not recommended unless users wants + // separate store for image and containers. + ImageStore string `toml:"imagestore,omitempty"` + // AdditionalLayerStores is the location of additional read/only // Layer stores. Usually used to access Networked File System // for shared image content diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go index 6b9a7afcd..33bf7184e 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go @@ -239,8 +239,8 @@ func (t *Task) getDriverVersion() (string, error) { } func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64, - length uint64, targetType string, params string) { - + length uint64, targetType string, params string, +) { return DmGetNextTarget(t.unmanaged, next, &start, &length, &targetType, ¶ms), start, length, targetType, params @@ -345,8 +345,7 @@ func RemoveDeviceDeferred(name string) error { // disable udev dm rules and delete the symlink under /dev/mapper by itself, // even if the removal is deferred by the kernel. cookie := new(uint) - var flags uint16 - flags = DmUdevDisableLibraryFallback + flags := uint16(DmUdevDisableLibraryFallback) if err := task.setCookie(cookie, flags); err != nil { return fmt.Errorf("devicemapper: Can not set cookie: %s", err) } @@ -384,7 +383,7 @@ func CancelDeferredRemove(deviceName string) error { return fmt.Errorf("devicemapper: Can't set sector %s", err) } - if err := task.setMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil { + if err := task.setMessage("@cancel_deferred_remove"); err != nil { return fmt.Errorf("devicemapper: Can't set message %s", err) } @@ -459,8 +458,7 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize } cookie := new(uint) - var flags uint16 - flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag + flags := uint16(DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag) if err := task.setCookie(cookie, flags); err != nil { return fmt.Errorf("devicemapper: Can't set cookie %s", err) } diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go index 7baca8126..9aef4c2fb 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go @@ -138,8 +138,8 @@ func dmTaskSetRoFct(task *cdmTask) int { } func dmTaskAddTargetFct(task *cdmTask, - start, size uint64, ttype, params string) int { - + start, size uint64, ttype, params string, +) int { Cttype := C.CString(ttype) defer free(Cttype) @@ -156,12 +156,11 @@ func dmTaskGetDepsFct(task *cdmTask) *Deps { } // golang issue: https://github.com/golang/go/issues/11925 - hdr := reflect.SliceHeader{ - Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))), - Len: int(Cdeps.count), - Cap: int(Cdeps.count), - } - devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr)) + var devices []C.uint64_t + devicesHdr := (*reflect.SliceHeader)(unsafe.Pointer(&devices)) + devicesHdr.Data = uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))) + devicesHdr.Len = int(Cdeps.count) + devicesHdr.Cap = int(Cdeps.count) deps := &Deps{ Count: uint32(Cdeps.count), diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go index bcc2109b6..9d0714b1b 100644 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go @@ -183,7 +183,6 @@ func (p *Pattern) Exclusion() bool { } func (p *Pattern) match(path string) (bool, error) { - if p.regexp == nil { if err := p.compile(); err != nil { return false, filepath.ErrBadPattern @@ -356,12 +355,12 @@ func CreateIfNotExists(path string, isDir bool) error { if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { if isDir { - return os.MkdirAll(path, 0755) + return os.MkdirAll(path, 0o755) } - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { return err } - f, err := os.OpenFile(path, os.O_CREATE, 0755) + f, err := os.OpenFile(path, os.O_CREATE, 0o755) if err != nil { return err } diff --git a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go index f52239f87..68c8c867d 100644 --- a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go +++ b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go @@ -104,7 +104,7 @@ func CreateIDMappedMount(source, target string, pid int) error { &attr, uint(unsafe.Sizeof(attr))); err != nil { return err } - if err := os.Mkdir(target, 0700); err != nil && !os.IsExist(err) { + if err := os.Mkdir(target, 0o700); err != nil && !os.IsExist(err) { return err } return moveMount(targetDirFd, target) @@ -140,7 +140,7 @@ func CreateUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int, for _, m := range idmap { mappings = mappings + fmt.Sprintf("%d %d %d\n", m.ContainerID, m.HostID, m.Size) } - return os.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0600) + return os.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0o600) } if err := writeMappings("uid_map", uidMaps); err != nil { cleanupFunc() diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go index daff1e4a9..4701dc5ac 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -91,13 +91,13 @@ func CanAccess(path string, pair IDPair) bool { } func accessible(isOwner, isGroup bool, perms os.FileMode) bool { - if isOwner && (perms&0100 == 0100) { + if isOwner && (perms&0o100 == 0o100) { return true } - if isGroup && (perms&0010 == 0010) { + if isGroup && (perms&0o010 == 0o010) { return true } - if perms&0001 == 0001 { + if perms&0o001 == 0o001 { return true } return false diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go index 40e507f77..ac27718de 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go @@ -89,7 +89,6 @@ func addUser(userName string) error { } func createSubordinateRanges(name string) error { - // first, we should verify that ranges weren't automatically created // by the distro tooling ranges, err := readSubuid(name) diff --git a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go index 33a7dee6c..b3772bdb3 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go @@ -19,8 +19,8 @@ func resolveBinary(binname string) (string, error) { if err != nil { return "", err } - //only return no error if the final resolved binary basename - //matches what was searched for + // only return no error if the final resolved binary basename + // matches what was searched for if filepath.Base(resolvedPath) == binname { return resolvedPath, nil } diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go index 335980914..a357b809e 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go @@ -135,7 +135,7 @@ func openLock(path string, ro bool) (fd int, err error) { // the directory of the lockfile seems to be removed, try to create it if os.IsNotExist(err) { - if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { + if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil { return fd, fmt.Errorf("creating lock file directory: %w", err) } diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go index 09f2aca5c..ca27a483d 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go @@ -138,6 +138,7 @@ func (l *LockFile) Modified() (bool, error) { func (l *LockFile) Touch() error { return nil } + func (l *LockFile) IsReadWrite() bool { return false } diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go index de10e3324..b8bfa5897 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go +++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go @@ -26,7 +26,7 @@ func stringToLoopName(src string) [LoNameSize]uint8 { } func getNextFreeLoopbackIndex() (int, error) { - f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) + f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0o644) if err != nil { return 0, err } @@ -67,7 +67,7 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File } // OpenFile adds O_CLOEXEC - loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) + loopFile, err = os.OpenFile(target, os.O_RDWR, 0o644) if err != nil { logrus.Errorf("Opening loopback device: %s", err) return nil, ErrAttachLoopbackDevice @@ -114,7 +114,6 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File // AttachLoopDevice attaches the given sparse file to the next // available loopback device. It returns an opened *os.File. func AttachLoopDevice(sparseName string) (loop *os.File, err error) { - // Try to retrieve the next available loopback device via syscall. // If it fails, we discard error and start looping for a // loopback from index 0. @@ -124,7 +123,7 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) { } // OpenFile adds O_CLOEXEC - sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) + sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0o644) if err != nil { logrus.Errorf("Opening sparse file: %v", err) return nil, ErrAttachLoopbackDevice diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go index b30da9fad..4b7fdee83 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go @@ -24,7 +24,6 @@ func (k *VersionInfo) String() string { // GetKernelVersion gets the current kernel version. func GetKernelVersion() (*VersionInfo, error) { - var ( h windows.Handle dwVersion uint32 diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp.go b/vendor/github.com/containers/storage/pkg/regexp/regexp.go index 458b83378..1a3333dba 100644 --- a/vendor/github.com/containers/storage/pkg/regexp/regexp.go +++ b/vendor/github.com/containers/storage/pkg/regexp/regexp.go @@ -11,22 +11,27 @@ import ( // of apps that want to use global regex variables. This library initializes them on // first use as opposed to the start of the executable. type Regexp struct { + *regexpStruct +} + +type regexpStruct struct { + _ noCopy once sync.Once regexp *regexp.Regexp val string } func Delayed(val string) Regexp { - re := Regexp{ + re := ®expStruct{ val: val, } if precompile { re.regexp = regexp.MustCompile(re.val) } - return re + return Regexp{re} } -func (re *Regexp) compile() { +func (re *regexpStruct) compile() { if precompile { return } @@ -35,180 +40,195 @@ func (re *Regexp) compile() { }) } -func (re *Regexp) Expand(dst []byte, template []byte, src []byte, match []int) []byte { +func (re *regexpStruct) Expand(dst []byte, template []byte, src []byte, match []int) []byte { re.compile() return re.regexp.Expand(dst, template, src, match) } -func (re *Regexp) ExpandString(dst []byte, template string, src string, match []int) []byte { +func (re *regexpStruct) ExpandString(dst []byte, template string, src string, match []int) []byte { re.compile() return re.regexp.ExpandString(dst, template, src, match) } -func (re *Regexp) Find(b []byte) []byte { + +func (re *regexpStruct) Find(b []byte) []byte { re.compile() return re.regexp.Find(b) } -func (re *Regexp) FindAll(b []byte, n int) [][]byte { +func (re *regexpStruct) FindAll(b []byte, n int) [][]byte { re.compile() return re.regexp.FindAll(b, n) } -func (re *Regexp) FindAllIndex(b []byte, n int) [][]int { +func (re *regexpStruct) FindAllIndex(b []byte, n int) [][]int { re.compile() return re.regexp.FindAllIndex(b, n) } -func (re *Regexp) FindAllString(s string, n int) []string { +func (re *regexpStruct) FindAllString(s string, n int) []string { re.compile() return re.regexp.FindAllString(s, n) } -func (re *Regexp) FindAllStringIndex(s string, n int) [][]int { +func (re *regexpStruct) FindAllStringIndex(s string, n int) [][]int { re.compile() return re.regexp.FindAllStringIndex(s, n) } -func (re *Regexp) FindAllStringSubmatch(s string, n int) [][]string { +func (re *regexpStruct) FindAllStringSubmatch(s string, n int) [][]string { re.compile() return re.regexp.FindAllStringSubmatch(s, n) } -func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int { +func (re *regexpStruct) FindAllStringSubmatchIndex(s string, n int) [][]int { re.compile() return re.regexp.FindAllStringSubmatchIndex(s, n) } -func (re *Regexp) FindAllSubmatch(b []byte, n int) [][][]byte { +func (re *regexpStruct) FindAllSubmatch(b []byte, n int) [][][]byte { re.compile() return re.regexp.FindAllSubmatch(b, n) } -func (re *Regexp) FindAllSubmatchIndex(b []byte, n int) [][]int { +func (re *regexpStruct) FindAllSubmatchIndex(b []byte, n int) [][]int { re.compile() return re.regexp.FindAllSubmatchIndex(b, n) } -func (re *Regexp) FindIndex(b []byte) (loc []int) { +func (re *regexpStruct) FindIndex(b []byte) (loc []int) { re.compile() return re.regexp.FindIndex(b) } -func (re *Regexp) FindReaderIndex(r io.RuneReader) (loc []int) { +func (re *regexpStruct) FindReaderIndex(r io.RuneReader) (loc []int) { re.compile() return re.regexp.FindReaderIndex(r) } -func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int { +func (re *regexpStruct) FindReaderSubmatchIndex(r io.RuneReader) []int { re.compile() return re.regexp.FindReaderSubmatchIndex(r) } -func (re *Regexp) FindString(s string) string { +func (re *regexpStruct) FindString(s string) string { re.compile() return re.regexp.FindString(s) } -func (re *Regexp) FindStringIndex(s string) (loc []int) { +func (re *regexpStruct) FindStringIndex(s string) (loc []int) { re.compile() return re.regexp.FindStringIndex(s) } -func (re *Regexp) FindStringSubmatch(s string) []string { +func (re *regexpStruct) FindStringSubmatch(s string) []string { re.compile() return re.regexp.FindStringSubmatch(s) } -func (re *Regexp) FindStringSubmatchIndex(s string) []int { +func (re *regexpStruct) FindStringSubmatchIndex(s string) []int { re.compile() return re.regexp.FindStringSubmatchIndex(s) } -func (re *Regexp) FindSubmatch(b []byte) [][]byte { +func (re *regexpStruct) FindSubmatch(b []byte) [][]byte { re.compile() return re.regexp.FindSubmatch(b) } -func (re *Regexp) FindSubmatchIndex(b []byte) []int { +func (re *regexpStruct) FindSubmatchIndex(b []byte) []int { re.compile() return re.regexp.FindSubmatchIndex(b) } -func (re *Regexp) LiteralPrefix() (prefix string, complete bool) { +func (re *regexpStruct) LiteralPrefix() (prefix string, complete bool) { re.compile() return re.regexp.LiteralPrefix() } -func (re *Regexp) Longest() { +func (re *regexpStruct) Longest() { re.compile() re.regexp.Longest() } -func (re *Regexp) Match(b []byte) bool { +func (re *regexpStruct) Match(b []byte) bool { re.compile() return re.regexp.Match(b) } -func (re *Regexp) MatchReader(r io.RuneReader) bool { +func (re *regexpStruct) MatchReader(r io.RuneReader) bool { re.compile() return re.regexp.MatchReader(r) } -func (re *Regexp) MatchString(s string) bool { + +func (re *regexpStruct) MatchString(s string) bool { re.compile() return re.regexp.MatchString(s) } -func (re *Regexp) NumSubexp() int { +func (re *regexpStruct) NumSubexp() int { re.compile() return re.regexp.NumSubexp() } -func (re *Regexp) ReplaceAll(src, repl []byte) []byte { +func (re *regexpStruct) ReplaceAll(src, repl []byte) []byte { re.compile() return re.regexp.ReplaceAll(src, repl) } -func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte { +func (re *regexpStruct) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte { re.compile() return re.regexp.ReplaceAllFunc(src, repl) } -func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte { +func (re *regexpStruct) ReplaceAllLiteral(src, repl []byte) []byte { re.compile() return re.regexp.ReplaceAllLiteral(src, repl) } -func (re *Regexp) ReplaceAllLiteralString(src, repl string) string { +func (re *regexpStruct) ReplaceAllLiteralString(src, repl string) string { re.compile() return re.regexp.ReplaceAllLiteralString(src, repl) } -func (re *Regexp) ReplaceAllString(src, repl string) string { +func (re *regexpStruct) ReplaceAllString(src, repl string) string { re.compile() return re.regexp.ReplaceAllString(src, repl) } -func (re *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string { +func (re *regexpStruct) ReplaceAllStringFunc(src string, repl func(string) string) string { re.compile() return re.regexp.ReplaceAllStringFunc(src, repl) } -func (re *Regexp) Split(s string, n int) []string { +func (re *regexpStruct) Split(s string, n int) []string { re.compile() return re.regexp.Split(s, n) } -func (re *Regexp) String() string { +func (re *regexpStruct) String() string { re.compile() return re.regexp.String() } -func (re *Regexp) SubexpIndex(name string) int { +func (re *regexpStruct) SubexpIndex(name string) int { re.compile() return re.regexp.SubexpIndex(name) } -func (re *Regexp) SubexpNames() []string { +func (re *regexpStruct) SubexpNames() []string { re.compile() return re.regexp.SubexpNames() } + +// noCopy may be added to structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +// +// Note that it must not be embedded, due to the Lock and Unlock methods. +type noCopy struct{} + +// Lock is a no-op used by -copylocks checker from `go vet`. +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} diff --git a/vendor/github.com/containers/storage/pkg/stringid/stringid.go b/vendor/github.com/containers/storage/pkg/stringid/stringid.go index 3ae44fd8a..20abc7407 100644 --- a/vendor/github.com/containers/storage/pkg/stringid/stringid.go +++ b/vendor/github.com/containers/storage/pkg/stringid/stringid.go @@ -63,7 +63,7 @@ func generateID(r io.Reader) string { } } -// GenerateRandomID returns a unique id. +// GenerateRandomID returns a pseudorandom 64-character hex string. func GenerateRandomID() string { return generateID(cryptorand.Reader) } diff --git a/vendor/github.com/containers/storage/pkg/system/errors.go b/vendor/github.com/containers/storage/pkg/system/errors.go index 288318985..b87d419b5 100644 --- a/vendor/github.com/containers/storage/pkg/system/errors.go +++ b/vendor/github.com/containers/storage/pkg/system/errors.go @@ -4,7 +4,5 @@ import ( "errors" ) -var ( - // ErrNotSupportedPlatform means the platform is not supported. - ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") -) +// ErrNotSupportedPlatform means the platform is not supported. +var ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") diff --git a/vendor/github.com/containers/storage/pkg/system/init_windows.go b/vendor/github.com/containers/storage/pkg/system/init_windows.go index 019c66441..5f6fea1d3 100644 --- a/vendor/github.com/containers/storage/pkg/system/init_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/init_windows.go @@ -13,5 +13,4 @@ func init() { if os.Getenv("LCOW_SUPPORTED") != "" { lcowSupported = true } - } diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go index df53c40e2..a90b23e03 100644 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go @@ -84,7 +84,6 @@ func getFreeMem() int64 { // // MemInfo type. func ReadMemInfo() (*MemInfo, error) { - ppKernel := C.getPpKernel() MemTotal := getTotalMem() MemFree := getFreeMem() diff --git a/vendor/github.com/containers/storage/pkg/system/path.go b/vendor/github.com/containers/storage/pkg/system/path.go index f634a6be6..ca076f2bc 100644 --- a/vendor/github.com/containers/storage/pkg/system/path.go +++ b/vendor/github.com/containers/storage/pkg/system/path.go @@ -17,5 +17,4 @@ func DefaultPathEnv(platform string) string { return "" } return defaultUnixPathEnv - } diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go index 60c7d8bd9..5917fa251 100644 --- a/vendor/github.com/containers/storage/pkg/system/rm.go +++ b/vendor/github.com/containers/storage/pkg/system/rm.go @@ -30,6 +30,12 @@ func EnsureRemoveAll(dir string) error { exitOnErr := make(map[string]int) maxRetry := 100 + // Attempt a simple remove all first, this avoids the more expensive + // RecursiveUnmount call if not needed. + if err := os.RemoveAll(dir); err == nil { + return nil + } + // Attempt to unmount anything beneath this dir first if err := mount.RecursiveUnmount(dir); err != nil { logrus.Debugf("RecusiveUnmount on %s failed: %v", dir, err) diff --git a/vendor/github.com/containers/storage/pkg/system/stat_common.go b/vendor/github.com/containers/storage/pkg/system/stat_common.go index e965c54c2..2f44d18b6 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_common.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_common.go @@ -3,8 +3,7 @@ package system -type platformStatT struct { -} +type platformStatT struct{} // Flags return file flags if supported or zero otherwise func (s StatT) Flags() uint32 { diff --git a/vendor/github.com/containers/storage/pkg/system/stat_darwin.go b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go index 715f05b93..57850a883 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_darwin.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go @@ -4,10 +4,12 @@ import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + return &StatT{ + size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil + mtim: s.Mtimespec, + }, nil } diff --git a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go index 9c510468f..4b95073a3 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go @@ -13,13 +13,15 @@ func (s StatT) Flags() uint32 { // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - st := &StatT{size: s.Size, + st := &StatT{ + size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), mtim: s.Mtimespec, - dev: s.Dev} + dev: s.Dev, + } st.flags = s.Flags st.dev = s.Dev return st, nil diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go index e5dcba822..e3d13463f 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_linux.go @@ -4,13 +4,15 @@ import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + return &StatT{ + size: s.Size, mode: s.Mode, uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), mtim: s.Mtim, - dev: uint64(s.Dev)}, nil + dev: uint64(s.Dev), + }, nil } // FromStatT converts a syscall.Stat_t type to a system.Stat_t type diff --git a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go index b607dea94..a413e1714 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go @@ -4,10 +4,12 @@ import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + return &StatT{ + size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil + mtim: s.Mtim, + }, nil } diff --git a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go index b607dea94..a413e1714 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go @@ -4,10 +4,12 @@ import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + return &StatT{ + size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil + mtim: s.Mtim, + }, nil } diff --git a/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/github.com/containers/storage/pkg/system/stat_windows.go index 81edaadbb..6d5c6c142 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_windows.go @@ -65,5 +65,6 @@ func fromStatT(fi *os.FileInfo) (*StatT, error) { return &StatT{ size: (*fi).Size(), mode: (*fi).Mode(), - mtim: (*fi).ModTime()}, nil + mtim: (*fi).ModTime(), + }, nil } diff --git a/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go index b81793adc..c14a5cc4d 100644 --- a/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go +++ b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go @@ -102,9 +102,7 @@ func (idx *TruncIndex) Get(s string) (string, error) { if s == "" { return "", ErrEmptyPrefix } - var ( - id string - ) + var id string subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { if id != "" { // we haven't found the ID if there are two or more IDs diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go index f52760abb..7a44ca301 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go @@ -59,7 +59,7 @@ func (c *Cmd) Start() error { if err != nil { pidRead.Close() pidWrite.Close() - return fmt.Errorf("creating pid pipe: %w", err) + return fmt.Errorf("creating continue read/write pipe: %w", err) } c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3)) c.ExtraFiles = append(c.ExtraFiles, continueRead) diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go index 81cd67762..e169633d0 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go @@ -129,7 +129,7 @@ func (c *Cmd) Start() error { if err != nil { pidRead.Close() pidWrite.Close() - return fmt.Errorf("creating pid pipe: %w", err) + return fmt.Errorf("creating continue read/write pipe: %w", err) } c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3)) c.ExtraFiles = append(c.ExtraFiles, continueRead) diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index 99cc94a36..93a9a236e 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -55,7 +55,7 @@ additionalimagestores = [ # can deduplicate pulling of content, disk storage of content and can allow the # kernel to use less memory when running containers. -# containers/storage supports four keys +# containers/storage supports three keys # * enable_partial_images="true" | "false" # Tells containers/storage to look for files previously pulled in storage # rather then always pulling them from the container registry. @@ -75,8 +75,8 @@ pull_options = {enable_partial_images = "false", use_hard_links = "false", ostre # mappings which the kernel will allow when you later attempt to run a # container. # -# remap-uids = 0:1668442479:65536 -# remap-gids = 0:1668442479:65536 +# remap-uids = "0:1668442479:65536" +# remap-gids = "0:1668442479:65536" # Remap-User/Group is a user name which can be used to look up one or more UID/GID # ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting @@ -84,7 +84,8 @@ pull_options = {enable_partial_images = "false", use_hard_links = "false", ostre # range that matches the specified name, and using the length of that range. # Additional ranges are then assigned, using the ranges which specify the # lowest host-level IDs first, to the lowest not-yet-mapped in-container ID, -# until all of the entries have been used for maps. +# until all of the entries have been used for maps. This setting overrides the +# Remap-UIDs/GIDs setting. # # remap-user = "containers" # remap-group = "containers" @@ -100,7 +101,7 @@ pull_options = {enable_partial_images = "false", use_hard_links = "false", ostre # Auto-userns-min-size is the minimum size for a user namespace created automatically. # auto-userns-min-size=1024 # -# Auto-userns-max-size is the minimum size for a user namespace created automatically. +# Auto-userns-max-size is the maximum size for a user namespace created automatically. # auto-userns-max-size=65536 [storage.options.overlay] diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 4c4082084..14c1edd7f 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -520,6 +520,13 @@ type Store interface { // references in the json files. These can happen in the case of unclean // shutdowns or regular restarts in transient store mode. GarbageCollect() error + + // Check returns a report of things that look wrong in the store. + Check(options *CheckOptions) (CheckReport, error) + // Repair attempts to remediate problems mentioned in the CheckReport, + // usually by deleting layers and images which are damaged. If the + // right options are set, it will remove containers as well. + Repair(report CheckReport, options *RepairOptions) []error } // AdditionalLayer represents a layer that is contained in the additional layer store @@ -661,6 +668,7 @@ type store struct { usernsLock *lockfile.LockFile graphRoot string graphOptions []string + imageStoreDir string pullOptions map[string]string uidMap []idtools.IDMap gidMap []idtools.IDMap @@ -668,6 +676,7 @@ type store struct { autoNsMinSize uint32 autoNsMaxSize uint32 imageStore rwImageStore + rwImageStores []rwImageStore roImageStores []roImageStore containerStore rwContainerStore digestLockRoot string @@ -749,15 +758,25 @@ func GetStore(options types.StoreOptions) (Store, error) { options.RunRoot = defaultOpts.RunRoot } - if err := os.MkdirAll(options.RunRoot, 0700); err != nil { + if err := os.MkdirAll(options.RunRoot, 0o700); err != nil { return nil, err } - if err := os.MkdirAll(options.GraphRoot, 0700); err != nil { + if err := os.MkdirAll(options.GraphRoot, 0o700); err != nil { return nil, err } - if err := os.MkdirAll(filepath.Join(options.GraphRoot, options.GraphDriverName), 0700); err != nil { + if options.ImageStore != "" { + if err := os.MkdirAll(options.ImageStore, 0o700); err != nil { + return nil, err + } + } + if err := os.MkdirAll(filepath.Join(options.GraphRoot, options.GraphDriverName), 0o700); err != nil { return nil, err } + if options.ImageStore != "" { + if err := os.MkdirAll(filepath.Join(options.ImageStore, options.GraphDriverName), 0o700); err != nil { + return nil, err + } + } graphLock, err := lockfile.GetLockFile(filepath.Join(options.GraphRoot, "storage.lock")) if err != nil { @@ -785,6 +804,7 @@ func GetStore(options types.StoreOptions) (Store, error) { usernsLock: usernsLock, graphRoot: options.GraphRoot, graphOptions: options.GraphDriverOptions, + imageStoreDir: options.ImageStore, pullOptions: options.PullOptions, uidMap: copyIDMap(options.UIDMap), gidMap: copyIDMap(options.GIDMap), @@ -889,8 +909,12 @@ func (s *store) load() error { } driverPrefix := s.graphDriverName + "-" - gipath := filepath.Join(s.graphRoot, driverPrefix+"images") - if err := os.MkdirAll(gipath, 0700); err != nil { + imgStoreRoot := s.imageStoreDir + if imgStoreRoot == "" { + imgStoreRoot = s.graphRoot + } + gipath := filepath.Join(imgStoreRoot, driverPrefix+"images") + if err := os.MkdirAll(gipath, 0o700); err != nil { return err } ris, err := newImageStore(gipath) @@ -900,11 +924,11 @@ func (s *store) load() error { s.imageStore = ris gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers") - if err := os.MkdirAll(gcpath, 0700); err != nil { + if err := os.MkdirAll(gcpath, 0o700); err != nil { return err } rcpath := filepath.Join(s.runRoot, driverPrefix+"containers") - if err := os.MkdirAll(rcpath, 0700); err != nil { + if err := os.MkdirAll(rcpath, 0o700); err != nil { return err } @@ -917,15 +941,28 @@ func (s *store) load() error { for _, store := range driver.AdditionalImageStores() { gipath := filepath.Join(store, driverPrefix+"images") - ris, err := newROImageStore(gipath) - if err != nil { - return err + var ris roImageStore + if s.imageStoreDir != "" && store == s.graphRoot { + // If --imagestore was set and current store + // is `graphRoot` then mount it as a `rw` additional + // store instead of `readonly` additional store. + imageStore, err := newImageStore(gipath) + if err != nil { + return err + } + s.rwImageStores = append(s.rwImageStores, imageStore) + ris = imageStore + } else { + ris, err = newROImageStore(gipath) + if err != nil { + return err + } } s.roImageStores = append(s.roImageStores, ris) } s.digestLockRoot = filepath.Join(s.runRoot, driverPrefix+"locks") - if err := os.MkdirAll(s.digestLockRoot, 0700); err != nil { + if err := os.MkdirAll(s.digestLockRoot, 0o700); err != nil { return err } @@ -989,8 +1026,15 @@ func (s *store) stopUsingGraphDriver() { // Almost all users should use startUsingGraphDriver instead. // The caller must hold s.graphLock. func (s *store) createGraphDriverLocked() (drivers.Driver, error) { + driverRoot := s.imageStoreDir + imageStoreBase := s.graphRoot + if driverRoot == "" { + driverRoot = s.graphRoot + imageStoreBase = "" + } config := drivers.Options{ - Root: s.graphRoot, + Root: driverRoot, + ImageStore: imageStoreBase, RunRoot: s.runRoot, DriverPriority: s.graphDriverPriority, DriverOptions: s.graphOptions, @@ -1017,11 +1061,15 @@ func (s *store) getLayerStoreLocked() (rwLayerStore, error) { } driverPrefix := s.graphDriverName + "-" rlpath := filepath.Join(s.runRoot, driverPrefix+"layers") - if err := os.MkdirAll(rlpath, 0700); err != nil { + if err := os.MkdirAll(rlpath, 0o700); err != nil { return nil, err } - glpath := filepath.Join(s.graphRoot, driverPrefix+"layers") - if err := os.MkdirAll(glpath, 0700); err != nil { + imgStoreRoot := s.imageStoreDir + if imgStoreRoot == "" { + imgStoreRoot = s.graphRoot + } + glpath := filepath.Join(imgStoreRoot, driverPrefix+"layers") + if err := os.MkdirAll(glpath, 0o700); err != nil { return nil, err } rls, err := s.newLayerStore(rlpath, glpath, s.graphDriver, s.transientStore) @@ -1052,7 +1100,7 @@ func (s *store) getROLayerStoresLocked() ([]roLayerStore, error) { } driverPrefix := s.graphDriverName + "-" rlpath := filepath.Join(s.runRoot, driverPrefix+"layers") - if err := os.MkdirAll(rlpath, 0700); err != nil { + if err := os.MkdirAll(rlpath, 0o700); err != nil { return nil, err } for _, store := range s.graphDriver.AdditionalImageStores() { @@ -1081,7 +1129,7 @@ func (s *store) bothLayerStoreKindsLocked() (rwLayerStore, []roLayerStore, error } // bothLayerStoreKinds returns the primary, and additional read-only, layer store objects used by the store. -// It must be called with s.graphLock held. +// It must be called WITHOUT s.graphLock held. func (s *store) bothLayerStoreKinds() (rwLayerStore, []roLayerStore, error) { if err := s.startUsingGraphDriver(); err != nil { return nil, nil, err @@ -1115,56 +1163,107 @@ func (s *store) allLayerStores() ([]roLayerStore, error) { // readAllLayerStores processes allLayerStores() in order: // It locks the store for reading, checks for updates, and calls // -// (done, err) := fn(store) +// (data, done, err) := fn(store) // // until the callback returns done == true, and returns the data from the callback. // -// If reading any layer store fails, it immediately returns (true, err). +// If reading any layer store fails, it immediately returns ({}, true, err). // -// If all layer stores are processed without setting done == true, it returns (false, nil). +// If all layer stores are processed without setting done == true, it returns ({}, false, nil). // // Typical usage: // -// var res T = failureValue -// if done, err := s.readAllLayerStores(store, func(…) { +// if res, done, err := s.readAllLayerStores(store, func(…) { // … // }; done { // return res, err // } -func (s *store) readAllLayerStores(fn func(store roLayerStore) (bool, error)) (bool, error) { +func readAllLayerStores[T any](s *store, fn func(store roLayerStore) (T, bool, error)) (T, bool, error) { + var zeroRes T // A zero value of T + layerStores, err := s.allLayerStores() if err != nil { - return true, err + return zeroRes, true, err } for _, s := range layerStores { store := s if err := store.startReading(); err != nil { - return true, err + return zeroRes, true, err } defer store.stopReading() - if done, err := fn(store); done { - return true, err + if res, done, err := fn(store); done { + return res, true, err } } - return false, nil + return zeroRes, false, nil } // writeToLayerStore is a helper for working with store.getLayerStore(): // It locks the store for writing, checks for updates, and calls fn() // It returns the return value of fn, or its own error initializing the store. -func (s *store) writeToLayerStore(fn func(store rwLayerStore) error) error { +func writeToLayerStore[T any](s *store, fn func(store rwLayerStore) (T, error)) (T, error) { + var zeroRes T // A zero value of T + store, err := s.getLayerStore() if err != nil { - return err + return zeroRes, err } if err := store.startWriting(); err != nil { - return err + return zeroRes, err } defer store.stopWriting() return fn(store) } +// readOrWriteAllLayerStores processes allLayerStores() in order: +// It locks the writeable store for writing and all others for reading, checks +// for updates, and calls +// +// (data, done, err) := fn(store) +// +// until the callback returns done == true, and returns the data from the callback. +// +// If reading or writing any layer store fails, it immediately returns ({}, true, err). +// +// If all layer stores are processed without setting done == true, it returns ({}, false, nil). +// +// Typical usage: +// +// if res, done, err := s.readOrWriteAllLayerStores(store, func(…) { +// … +// }; done { +// return res, err +// } +func readOrWriteAllLayerStores[T any](s *store, fn func(store roLayerStore) (T, bool, error)) (T, bool, error) { + var zeroRes T // A zero value of T + + rwLayerStore, roLayerStores, err := s.bothLayerStoreKinds() + if err != nil { + return zeroRes, true, err + } + + if err := rwLayerStore.startWriting(); err != nil { + return zeroRes, true, err + } + defer rwLayerStore.stopWriting() + if res, done, err := fn(rwLayerStore); done { + return res, true, err + } + + for _, s := range roLayerStores { + store := s + if err := store.startReading(); err != nil { + return zeroRes, true, err + } + defer store.stopReading() + if res, done, err := fn(store); done { + return res, true, err + } + } + return zeroRes, false, nil +} + // allImageStores returns a list of all image store objects used by the Store. // This is a convenience method for read-only users of the Store. func (s *store) allImageStores() []roImageStore { @@ -1174,53 +1273,69 @@ func (s *store) allImageStores() []roImageStore { // readAllImageStores processes allImageStores() in order: // It locks the store for reading, checks for updates, and calls // -// (done, err) := fn(store) +// (data, done, err) := fn(store) // // until the callback returns done == true, and returns the data from the callback. // -// If reading any Image store fails, it immediately returns (true, err). +// If reading any Image store fails, it immediately returns ({}, true, err). // -// If all Image stores are processed without setting done == true, it returns (false, nil). +// If all Image stores are processed without setting done == true, it returns ({}, false, nil). // // Typical usage: // -// var res T = failureValue -// if done, err := s.readAllImageStores(store, func(…) { +// if res, done, err := readAllImageStores(store, func(…) { // … // }; done { // return res, err // } -func (s *store) readAllImageStores(fn func(store roImageStore) (bool, error)) (bool, error) { +func readAllImageStores[T any](s *store, fn func(store roImageStore) (T, bool, error)) (T, bool, error) { + var zeroRes T // A zero value of T + for _, s := range s.allImageStores() { store := s if err := store.startReading(); err != nil { - return true, err + return zeroRes, true, err } defer store.stopReading() - if done, err := fn(store); done { - return true, err + if res, done, err := fn(store); done { + return res, true, err } } - return false, nil + return zeroRes, false, nil } -// writeToImageStore is a convenience helper for working with store.getImageStore(): +// writeToImageStore is a convenience helper for working with store.imageStore: // It locks the store for writing, checks for updates, and calls fn(), which can then access store.imageStore. // It returns the return value of fn, or its own error initializing the store. -func (s *store) writeToImageStore(fn func() error) error { +func writeToImageStore[T any](s *store, fn func() (T, error)) (T, error) { if err := s.imageStore.startWriting(); err != nil { - return err + var zeroRes T // A zero value of T + return zeroRes, err } defer s.imageStore.stopWriting() return fn() } -// writeToContainerStore is a convenience helper for working with store.getContainerStore(): +// readContainerStore is a convenience helper for working with store.containerStore: +// It locks the store for reading, checks for updates, and calls fn(), which can then access store.containerStore. +// If reading the container store fails, it returns ({}, true, err). +// Returns the return value of fn on success. +func readContainerStore[T any](s *store, fn func() (T, bool, error)) (T, bool, error) { + if err := s.containerStore.startReading(); err != nil { + var zeroRes T // A zero value of T + return zeroRes, true, err + } + defer s.containerStore.stopReading() + return fn() +} + +// writeToContainerStore is a convenience helper for working with store.containerStore: // It locks the store for writing, checks for updates, and calls fn(), which can then access store.containerStore. // It returns the return value of fn, or its own error initializing the store. -func (s *store) writeToContainerStore(fn func() error) error { +func writeToContainerStore[T any](s *store, fn func() (T, error)) (T, error) { if err := s.containerStore.startWriting(); err != nil { - return err + var zeroRes T // A zero value of T + return zeroRes, err } defer s.containerStore.stopWriting() return fn() @@ -1252,10 +1367,13 @@ func (s *store) writeToAllStores(fn func(rlstore rwLayerStore) error) error { return fn(rlstore) } -// canUseShifting returns ??? -// store must be locked for writing. -func canUseShifting(store rwLayerStore, uidmap, gidmap []idtools.IDMap) bool { - if !store.supportsShifting() { +// canUseShifting returns true if we can use mount-time arguments (shifting) to +// avoid having to create a mapped top layer for a base image when we want to +// use it to create a container using ID mappings. +// On entry: +// - rlstore must be locked for writing +func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool { + if !s.graphDriver.SupportsShifting() { return false } if uidmap != nil && !idtools.IsContiguous(uidmap) { @@ -1342,7 +1460,7 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w OriginalDigest: options.OriginalDigest, UncompressedDigest: options.UncompressedDigest, } - if canUseShifting(rlstore, uidMap, gidMap) { + if s.canUseShifting(uidMap, gidMap) { layerOptions.IDMappingOptions = types.IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil} } else { layerOptions.IDMappingOptions = types.IDMappingOptions{ @@ -1384,94 +1502,93 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i layer = ilayer.ID } - var options ImageOptions - var namesToAddAfterCreating []string - - if err := s.imageStore.startWriting(); err != nil { - return nil, err - } - defer s.imageStore.stopWriting() + return writeToImageStore(s, func() (*Image, error) { + var options ImageOptions + var namesToAddAfterCreating []string - // Check if the ID refers to an image in a read-only store -- we want - // to allow images in read-only stores to have their names changed, so - // if we find one, merge the new values in with what we know about the - // image that's already there. - if id != "" { - for _, is := range s.roImageStores { - store := is - if err := store.startReading(); err != nil { - return nil, err - } - defer store.stopReading() - if i, err := store.Get(id); err == nil { - // set information about this image in "options" - options = ImageOptions{ - Metadata: i.Metadata, - CreationDate: i.Created, - Digest: i.Digest, - Digests: copyDigestSlice(i.Digests), - NamesHistory: copyStringSlice(i.NamesHistory), + // Check if the ID refers to an image in a read-only store -- we want + // to allow images in read-only stores to have their names changed, so + // if we find one, merge the new values in with what we know about the + // image that's already there. + if id != "" { + for _, is := range s.roImageStores { + store := is + if err := store.startReading(); err != nil { + return nil, err } - for _, key := range i.BigDataNames { - data, err := store.BigData(id, key) - if err != nil { - return nil, err + defer store.stopReading() + if i, err := store.Get(id); err == nil { + // set information about this image in "options" + options = ImageOptions{ + Metadata: i.Metadata, + CreationDate: i.Created, + Digest: i.Digest, + Digests: copyDigestSlice(i.Digests), + NamesHistory: copyStringSlice(i.NamesHistory), } - dataDigest, err := store.BigDataDigest(id, key) - if err != nil { - return nil, err + for _, key := range i.BigDataNames { + data, err := store.BigData(id, key) + if err != nil { + return nil, err + } + dataDigest, err := store.BigDataDigest(id, key) + if err != nil { + return nil, err + } + options.BigData = append(options.BigData, ImageBigDataOption{ + Key: key, + Data: data, + Digest: dataDigest, + }) } - options.BigData = append(options.BigData, ImageBigDataOption{ - Key: key, - Data: data, - Digest: dataDigest, - }) + namesToAddAfterCreating = dedupeStrings(append(append([]string{}, i.Names...), names...)) + break } - namesToAddAfterCreating = dedupeStrings(append(append([]string{}, i.Names...), names...)) - break } } - } - // merge any passed-in options into "options" as best we can - if iOptions != nil { - if !iOptions.CreationDate.IsZero() { - options.CreationDate = iOptions.CreationDate - } - if iOptions.Digest != "" { - options.Digest = iOptions.Digest - } - options.Digests = append(options.Digests, copyDigestSlice(iOptions.Digests)...) - if iOptions.Metadata != "" { - options.Metadata = iOptions.Metadata + // merge any passed-in options into "options" as best we can + if iOptions != nil { + if !iOptions.CreationDate.IsZero() { + options.CreationDate = iOptions.CreationDate + } + if iOptions.Digest != "" { + options.Digest = iOptions.Digest + } + options.Digests = append(options.Digests, copyDigestSlice(iOptions.Digests)...) + if iOptions.Metadata != "" { + options.Metadata = iOptions.Metadata + } + options.BigData = append(options.BigData, copyImageBigDataOptionSlice(iOptions.BigData)...) + options.NamesHistory = append(options.NamesHistory, copyStringSlice(iOptions.NamesHistory)...) + if options.Flags == nil { + options.Flags = make(map[string]interface{}) + } + for k, v := range iOptions.Flags { + options.Flags[k] = v + } } - options.BigData = append(options.BigData, copyImageBigDataOptionSlice(iOptions.BigData)...) - options.NamesHistory = append(options.NamesHistory, copyStringSlice(iOptions.NamesHistory)...) - if options.Flags == nil { - options.Flags = make(map[string]interface{}) + + if options.CreationDate.IsZero() { + options.CreationDate = time.Now().UTC() } - for k, v := range iOptions.Flags { - options.Flags[k] = v + if metadata != "" { + options.Metadata = metadata } - } - if options.CreationDate.IsZero() { - options.CreationDate = time.Now().UTC() - } - if metadata != "" { - options.Metadata = metadata - } - - res, err := s.imageStore.create(id, names, layer, options) - if err == nil && len(namesToAddAfterCreating) > 0 { - // set any names we pulled up from an additional image store, now that we won't be - // triggering a duplicate names error - err = s.imageStore.updateNames(res.ID, namesToAddAfterCreating, addNames) - } - return res, err + res, err := s.imageStore.create(id, names, layer, options) + if err == nil && len(namesToAddAfterCreating) > 0 { + // set any names we pulled up from an additional image store, now that we won't be + // triggering a duplicate names error + err = s.imageStore.updateNames(res.ID, namesToAddAfterCreating, addNames) + } + return res, err + }) } -// imageTopLayerForMapping does ??? +// imageTopLayerForMapping locates the layer that can take the place of the +// image's top layer as the shared parent layer for a one or more containers +// which are using ID mappings. // On entry: // - ristore must be locked EITHER for reading or writing // - s.imageStore must be locked for writing; it might be identical to ristore. @@ -1480,7 +1597,7 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlstore rwLayerStore, lstores []roLayerStore, options types.IDMappingOptions) (*Layer, error) { layerMatchesMappingOptions := func(layer *Layer, options types.IDMappingOptions) bool { // If the driver supports shifting and the layer has no mappings, we can use it. - if canUseShifting(rlstore, options.UIDMap, options.GIDMap) && len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 { + if s.canUseShifting(options.UIDMap, options.GIDMap) && len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 { return true } // If we want host mapping, and the layer uses mappings, it's not the best match. @@ -1547,7 +1664,7 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlst // that lets us edit image metadata, so create a duplicate of the layer with the desired // mappings, and register it as an alternate top layer in the image. var layerOptions LayerOptions - if canUseShifting(rlstore, options.UIDMap, options.GIDMap) { + if s.canUseShifting(options.UIDMap, options.GIDMap) { layerOptions.IDMappingOptions = types.IDMappingOptions{ HostUIDMapping: true, HostGIDMapping: true, @@ -1700,7 +1817,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat // But in transient store mode, all container layers are volatile. Volatile: options.Volatile || s.transientStore, } - if canUseShifting(rlstore, uidMap, gidMap) { + if s.canUseShifting(uidMap, gidMap) { layerOptions.IDMappingOptions = types.IDMappingOptions{ HostUIDMapping: true, HostGIDMapping: true, @@ -1745,16 +1862,14 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat options.Volatile = true } - var container *Container - err = s.writeToContainerStore(func() error { + return writeToContainerStore(s, func() (*Container, error) { options.IDMappingOptions = types.IDMappingOptions{ HostUIDMapping: len(options.UIDMap) == 0, HostGIDMapping: len(options.GIDMap) == 0, UIDMap: copyIDMap(options.UIDMap), GIDMap: copyIDMap(options.GIDMap), } - var err error - container, err = s.containerStore.create(id, names, imageID, layer, &options) + container, err := s.containerStore.create(id, names, imageID, layer, &options) if err != nil || container == nil { if err2 := rlstore.Delete(layer); err2 != nil { if err == nil { @@ -1764,9 +1879,8 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat } } } - return err + return container, err }) - return container, err } func (s *store) SetMetadata(id, metadata string) error { @@ -1785,49 +1899,46 @@ func (s *store) SetMetadata(id, metadata string) error { } func (s *store) Metadata(id string) (string, error) { - var res string - - if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) (string, bool, error) { if store.Exists(id) { - var err error - res, err = store.Metadata(id) - return true, err + res, err := store.Metadata(id) + return res, true, err } - return false, nil + return "", false, nil }); done { return res, err } - if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { + if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) { if store.Exists(id) { - var err error - res, err = store.Metadata(id) - return true, err + res, err := store.Metadata(id) + return res, true, err } - return false, nil + return "", false, nil }); done { return res, err } - if err := s.containerStore.startReading(); err != nil { - return "", err - } - defer s.containerStore.stopReading() - if s.containerStore.Exists(id) { - return s.containerStore.Metadata(id) + if res, done, err := readContainerStore(s, func() (string, bool, error) { + if s.containerStore.Exists(id) { + res, err := s.containerStore.Metadata(id) + return res, true, err + } + return "", false, nil + }); done { + return res, err } + return "", ErrNotAnID } func (s *store) ListImageBigData(id string) ([]string, error) { - var res []string - if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { + if res, done, err := readAllImageStores(s, func(store roImageStore) ([]string, bool, error) { bigDataNames, err := store.BigDataNames(id) if err == nil { - res = bigDataNames - return true, nil + return bigDataNames, true, nil } - return false, nil + return nil, false, nil }); done { return res, err } @@ -1835,29 +1946,28 @@ func (s *store) ListImageBigData(id string) ([]string, error) { } func (s *store) ImageBigDataSize(id, key string) (int64, error) { - var res int64 = -1 - if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { + if res, done, err := readAllImageStores(s, func(store roImageStore) (int64, bool, error) { size, err := store.BigDataSize(id, key) if err == nil { - res = size - return true, nil + return size, true, nil } - return false, nil + return -1, false, nil }); done { - return res, err + if err != nil { + return -1, err + } + return res, nil } return -1, ErrSizeUnknown } func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) { - var res digest.Digest - if done, err := s.readAllImageStores(func(ristore roImageStore) (bool, error) { + if res, done, err := readAllImageStores(s, func(ristore roImageStore) (digest.Digest, bool, error) { d, err := ristore.BigDataDigest(id, key) if err == nil && d.Validate() == nil { - res = d - return true, nil + return d, true, nil } - return false, nil + return "", false, nil }); done { return res, err } @@ -1866,17 +1976,15 @@ func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) { func (s *store) ImageBigData(id, key string) ([]byte, error) { foundImage := false - var res []byte - if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { + if res, done, err := readAllImageStores(s, func(store roImageStore) ([]byte, bool, error) { data, err := store.BigData(id, key) if err == nil { - res = data - return true, nil + return data, true, nil } if store.Exists(id) { foundImage = true } - return false, nil + return nil, false, nil }); done { return res, err } @@ -1890,17 +1998,15 @@ func (s *store) ImageBigData(id, key string) ([]byte, error) { // named data associated with an layer. func (s *store) ListLayerBigData(id string) ([]string, error) { foundLayer := false - var res []string - if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) ([]string, bool, error) { data, err := store.BigDataNames(id) if err == nil { - res = data - return true, nil + return data, true, nil } if store.Exists(id) { foundLayer = true } - return false, nil + return nil, false, nil }); done { return res, err } @@ -1914,17 +2020,15 @@ func (s *store) ListLayerBigData(id string) ([]string, error) { // associated with a layer. func (s *store) LayerBigData(id, key string) (io.ReadCloser, error) { foundLayer := false - var res io.ReadCloser - if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) (io.ReadCloser, bool, error) { data, err := store.BigData(id, key) if err == nil { - res = data - return true, nil + return data, true, nil } if store.Exists(id) { foundLayer = true } - return false, nil + return nil, false, nil }); done { return res, err } @@ -1937,15 +2041,17 @@ func (s *store) LayerBigData(id, key string) (io.ReadCloser, error) { // SetLayerBigData stores a (possibly large) chunk of named data // associated with a layer. func (s *store) SetLayerBigData(id, key string, data io.Reader) error { - return s.writeToLayerStore(func(store rwLayerStore) error { - return store.SetBigData(id, key, data) + _, err := writeToLayerStore(s, func(store rwLayerStore) (struct{}, error) { + return struct{}{}, store.SetBigData(id, key, data) }) + return err } func (s *store) SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error { - return s.writeToImageStore(func() error { - return s.imageStore.SetBigData(id, key, data, digestManifest) + _, err := writeToImageStore(s, func() (struct{}, error) { + return struct{}{}, s.imageStore.SetBigData(id, key, data, digestManifest) }) + return err } func (s *store) ImageSize(id string) (int64, error) { @@ -2066,12 +2172,11 @@ func (s *store) ContainerSize(id string) (int64, error) { return -1, err } - var res int64 = -1 - err = s.writeToContainerStore(func() error { // Yes, s.containerStore.BigDataSize requires a write lock. + return writeToContainerStore(s, func() (int64, error) { // Yes, s.containerStore.BigDataSize requires a write lock. // Read the container record. container, err := s.containerStore.Get(id) if err != nil { - return err + return -1, err } // Read the container's layer's size. @@ -2081,24 +2186,24 @@ func (s *store) ContainerSize(id string) (int64, error) { if layer, err = store.Get(container.LayerID); err == nil { size, err = store.DiffSize("", layer.ID) if err != nil { - return fmt.Errorf("determining size of layer with ID %q: %w", layer.ID, err) + return -1, fmt.Errorf("determining size of layer with ID %q: %w", layer.ID, err) } break } } if layer == nil { - return fmt.Errorf("locating layer with ID %q: %w", container.LayerID, ErrLayerUnknown) + return -1, fmt.Errorf("locating layer with ID %q: %w", container.LayerID, ErrLayerUnknown) } // Count big data items. names, err := s.containerStore.BigDataNames(id) if err != nil { - return fmt.Errorf("reading list of big data items for container %q: %w", container.ID, err) + return -1, fmt.Errorf("reading list of big data items for container %q: %w", container.ID, err) } for _, name := range names { n, err := s.containerStore.BigDataSize(id, name) if err != nil { - return fmt.Errorf("reading size of big data item %q for container %q: %w", name, id, err) + return -1, fmt.Errorf("reading size of big data item %q for container %q: %w", name, id, err) } size += n } @@ -2106,92 +2211,88 @@ func (s *store) ContainerSize(id string) (int64, error) { // Count the size of our container directory and container run directory. n, err := directory.Size(cdir) if err != nil { - return err + return -1, err } size += n n, err = directory.Size(rdir) if err != nil { - return err + return -1, err } size += n - res = size - return nil + return size, nil }) - return res, err } func (s *store) ListContainerBigData(id string) ([]string, error) { - if err := s.containerStore.startReading(); err != nil { - return nil, err - } - defer s.containerStore.stopReading() - - return s.containerStore.BigDataNames(id) + res, _, err := readContainerStore(s, func() ([]string, bool, error) { + res, err := s.containerStore.BigDataNames(id) + return res, true, err + }) + return res, err } func (s *store) ContainerBigDataSize(id, key string) (int64, error) { - var res int64 = -1 - err := s.writeToContainerStore(func() error { // Yes, BigDataSize requires a write lock. - var err error - res, err = s.containerStore.BigDataSize(id, key) - return err + return writeToContainerStore(s, func() (int64, error) { // Yes, BigDataSize requires a write lock. + return s.containerStore.BigDataSize(id, key) }) - return res, err } func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) { - var res digest.Digest - err := s.writeToContainerStore(func() error { // Yes, BigDataDigest requires a write lock. - var err error - res, err = s.containerStore.BigDataDigest(id, key) - return err + return writeToContainerStore(s, func() (digest.Digest, error) { // Yes, BigDataDigest requires a write lock. + return s.containerStore.BigDataDigest(id, key) }) - return res, err } func (s *store) ContainerBigData(id, key string) ([]byte, error) { - if err := s.containerStore.startReading(); err != nil { - return nil, err - } - defer s.containerStore.stopReading() - return s.containerStore.BigData(id, key) + res, _, err := readContainerStore(s, func() ([]byte, bool, error) { + res, err := s.containerStore.BigData(id, key) + return res, true, err + }) + return res, err } func (s *store) SetContainerBigData(id, key string, data []byte) error { - return s.writeToContainerStore(func() error { - return s.containerStore.SetBigData(id, key, data) + _, err := writeToContainerStore(s, func() (struct{}, error) { + return struct{}{}, s.containerStore.SetBigData(id, key, data) }) + return err } func (s *store) Exists(id string) bool { - var res = false - - if done, _ := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + found, _, err := readAllLayerStores(s, func(store roLayerStore) (bool, bool, error) { if store.Exists(id) { - res = true - return true, nil + return true, true, nil } - return false, nil - }); done { - return res + return false, false, nil + }) + if err != nil { + return false + } + if found { + return true } - if done, _ := s.readAllImageStores(func(store roImageStore) (bool, error) { + found, _, err = readAllImageStores(s, func(store roImageStore) (bool, bool, error) { if store.Exists(id) { - res = true - return true, nil + return true, true, nil } - return false, nil - }); done { - return res + return false, false, nil + }) + if err != nil { + return false + } + if found { + return true } - if err := s.containerStore.startReading(); err != nil { + found, _, err = readContainerStore(s, func() (bool, bool, error) { + return s.containerStore.Exists(id), true, nil + }) + if err != nil { return false } - defer s.containerStore.stopReading() - return s.containerStore.Exists(id) + return found } func dedupeStrings(names []string) []string { @@ -2234,14 +2335,12 @@ func (s *store) RemoveNames(id string, names []string) error { func (s *store) updateNames(id string, names []string, op updateNameOperation) error { deduped := dedupeStrings(names) - layerFound := false - if err := s.writeToLayerStore(func(rlstore rwLayerStore) error { + if found, err := writeToLayerStore(s, func(rlstore rwLayerStore) (bool, error) { if !rlstore.Exists(id) { - return nil + return false, nil } - layerFound = true - return rlstore.updateNames(id, deduped, op) - }); err != nil || layerFound { + return true, rlstore.updateNames(id, deduped, op) + }); err != nil || found { return err } @@ -2295,14 +2394,12 @@ func (s *store) updateNames(id string, names []string, op updateNameOperation) e } } - containerFound := false - if err := s.writeToContainerStore(func() error { + if found, err := writeToContainerStore(s, func() (bool, error) { if !s.containerStore.Exists(id) { - return nil + return false, nil } - containerFound = true - return s.containerStore.updateNames(id, deduped, op) - }); err != nil || containerFound { + return true, s.containerStore.updateNames(id, deduped, op) + }); err != nil || found { return err } @@ -2310,67 +2407,62 @@ func (s *store) updateNames(id string, names []string, op updateNameOperation) e } func (s *store) Names(id string) ([]string, error) { - var res []string - - if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) ([]string, bool, error) { if l, err := store.Get(id); l != nil && err == nil { - res = l.Names - return true, nil + return l.Names, true, nil } - return false, nil + return nil, false, nil }); done { return res, err } - if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { + if res, done, err := readAllImageStores(s, func(store roImageStore) ([]string, bool, error) { if i, err := store.Get(id); i != nil && err == nil { - res = i.Names - return true, nil + return i.Names, true, nil } - return false, nil + return nil, false, nil }); done { return res, err } - if err := s.containerStore.startReading(); err != nil { - return nil, err - } - defer s.containerStore.stopReading() - if c, err := s.containerStore.Get(id); c != nil && err == nil { - return c.Names, nil + if res, done, err := readContainerStore(s, func() ([]string, bool, error) { + if c, err := s.containerStore.Get(id); c != nil && err == nil { + return c.Names, true, nil + } + return nil, false, nil + }); done { + return res, err } + return nil, ErrLayerUnknown } func (s *store) Lookup(name string) (string, error) { - var res string - - if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) (string, bool, error) { if l, err := store.Get(name); l != nil && err == nil { - res = l.ID - return true, nil + return l.ID, true, nil } - return false, nil + return "", false, nil }); done { return res, err } - if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { + if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) { if i, err := store.Get(name); i != nil && err == nil { - res = i.ID - return true, nil + return i.ID, true, nil } - return false, nil + return "", false, nil }); done { return res, err } - if err := s.containerStore.startReading(); err != nil { - return "", err - } - defer s.containerStore.stopReading() - if c, err := s.containerStore.Get(name); c != nil && err == nil { - return c.ID, nil + if res, done, err := readContainerStore(s, func() (string, bool, error) { + if c, err := s.containerStore.Get(name); c != nil && err == nil { + return c.ID, true, nil + } + return "", false, nil + }); done { + return res, err } return "", ErrLayerUnknown @@ -2430,8 +2522,22 @@ func (s *store) DeleteLayer(id string) error { func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) { layersToRemove := []string{} if err := s.writeToAllStores(func(rlstore rwLayerStore) error { - if s.imageStore.Exists(id) { - image, err := s.imageStore.Get(id) + // Delete image from all available imagestores configured to be used. + imageFound := false + for _, is := range append([]rwImageStore{s.imageStore}, s.rwImageStores...) { + if is != s.imageStore { + // This is an additional writeable image store + // so we must perform lock + if err := is.startWriting(); err != nil { + return err + } + defer is.stopWriting() + } + if !is.Exists(id) { + continue + } + imageFound = true + image, err := is.Get(id) if err != nil { return err } @@ -2447,7 +2553,7 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) if container, ok := aContainerByImage[id]; ok { return fmt.Errorf("image used by %v: %w", container, ErrImageUsedByContainer) } - images, err := s.imageStore.Images() + images, err := is.Images() if err != nil { return err } @@ -2469,7 +2575,7 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) } } if commit { - if err = s.imageStore.Delete(id); err != nil { + if err = is.Delete(id); err != nil { return err } } @@ -2514,7 +2620,8 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) layersToRemoveMap[layer] = struct{}{} layer = parent } - } else { + } + if !imageFound { return ErrNotAnImage } if commit { @@ -2542,63 +2649,45 @@ func (s *store) DeleteContainer(id string) error { return ErrNotAContainer } - errChan := make(chan error) - var wg sync.WaitGroup - + // delete the layer first, separately, so that if we get an + // error while trying to do so, we don't go ahead and delete + // the container record that refers to it, effectively losing + // track of it if rlstore.Exists(container.LayerID) { - wg.Add(1) - go func() { - errChan <- rlstore.Delete(container.LayerID) - wg.Done() - }() - } - wg.Add(1) - go func() { - errChan <- s.containerStore.Delete(id) - wg.Done() - }() + if err := rlstore.Delete(container.LayerID); err != nil { + return err + } + } + + var wg multierror.Group + wg.Go(func() error { return s.containerStore.Delete(id) }) middleDir := s.graphDriverName + "-containers" - gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) - wg.Add(1) - go func() { - defer wg.Done() + + wg.Go(func() error { + gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) // attempt a simple rm -rf first - err := os.RemoveAll(gcpath) - if err == nil { - errChan <- nil - return + if err := os.RemoveAll(gcpath); err == nil { + return nil } // and if it fails get to the more complicated cleanup - errChan <- system.EnsureRemoveAll(gcpath) - }() + return system.EnsureRemoveAll(gcpath) + }) - rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() error { + rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) // attempt a simple rm -rf first - err := os.RemoveAll(rcpath) - if err == nil { - errChan <- nil - return + if err := os.RemoveAll(rcpath); err == nil { + return nil } // and if it fails get to the more complicated cleanup - errChan <- system.EnsureRemoveAll(rcpath) - }() - - go func() { - wg.Wait() - close(errChan) - }() + return system.EnsureRemoveAll(rcpath) + }) - var errors []error - for err := range errChan { - if err != nil { - errors = append(errors, err) - } + if multierr := wg.Wait(); multierr != nil { + return multierr.ErrorOrNil() } - return multierror.Append(nil, errors...).ErrorOrNil() + return nil }) } @@ -2679,7 +2768,7 @@ func (s *store) mount(id string, options drivers.MountOpts) (string, error) { defer rlstore.stopWriting() if options.UidMaps != nil || options.GidMaps != nil { - options.DisableShifting = !canUseShifting(rlstore, options.UidMaps, options.GidMaps) + options.DisableShifting = !s.canUseShifting(options.UidMaps, options.GidMaps) } if rlstore.Exists(id) { @@ -2756,27 +2845,21 @@ func (s *store) Unmount(id string, force bool) (bool, error) { if layerID, err := s.ContainerLayerID(id); err == nil { id = layerID } - var res bool - err := s.writeToLayerStore(func(rlstore rwLayerStore) error { + return writeToLayerStore(s, func(rlstore rwLayerStore) (bool, error) { if rlstore.Exists(id) { - var err error - res, err = rlstore.unmount(id, force, false) - return err + return rlstore.unmount(id, force, false) } - return ErrLayerUnknown + return false, ErrLayerUnknown }) - return res, err } func (s *store) Changes(from, to string) ([]archive.Change, error) { - var res []archive.Change - if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) ([]archive.Change, bool, error) { if store.Exists(to) { - var err error - res, err = store.Changes(from, to) - return true, err + res, err := store.Changes(from, to) + return res, true, err } - return false, nil + return nil, false, nil }); done { return res, err } @@ -2784,16 +2867,17 @@ func (s *store) Changes(from, to string) ([]archive.Change, error) { } func (s *store) DiffSize(from, to string) (int64, error) { - var res int64 = -1 - if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) (int64, bool, error) { if store.Exists(to) { - var err error - res, err = store.DiffSize(from, to) - return true, err + res, err := store.DiffSize(from, to) + return res, true, err } - return false, nil + return -1, false, nil }); done { - return res, err + if err != nil { + return -1, err + } + return res, nil } return -1, ErrLayerUnknown } @@ -2837,71 +2921,61 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro } func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error { - return s.writeToLayerStore(func(rlstore rwLayerStore) error { + _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { if !rlstore.Exists(to) { - return ErrLayerUnknown + return struct{}{}, ErrLayerUnknown } - return rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options) + return struct{}{}, rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options) }) + return err } func (s *store) CleanupStagingDirectory(stagingDirectory string) error { - return s.writeToLayerStore(func(rlstore rwLayerStore) error { - return rlstore.CleanupStagingDirectory(stagingDirectory) + _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { + return struct{}{}, rlstore.CleanupStagingDirectory(stagingDirectory) }) + return err } func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { - var res *drivers.DriverWithDifferOutput - err := s.writeToLayerStore(func(rlstore rwLayerStore) error { + return writeToLayerStore(s, func(rlstore rwLayerStore) (*drivers.DriverWithDifferOutput, error) { if to != "" && !rlstore.Exists(to) { - return ErrLayerUnknown + return nil, ErrLayerUnknown } - var err error - res, err = rlstore.ApplyDiffWithDiffer(to, options, differ) - return err + return rlstore.ApplyDiffWithDiffer(to, options, differ) }) - return res, err } func (s *store) DifferTarget(id string) (string, error) { - var res string - err := s.writeToLayerStore(func(rlstore rwLayerStore) error { + return writeToLayerStore(s, func(rlstore rwLayerStore) (string, error) { if rlstore.Exists(id) { - var err error - res, err = rlstore.DifferTarget(id) - return err + return rlstore.DifferTarget(id) } - return ErrLayerUnknown + return "", ErrLayerUnknown }) - return res, err } func (s *store) ApplyDiff(to string, diff io.Reader) (int64, error) { - var res int64 = -1 - err := s.writeToLayerStore(func(rlstore rwLayerStore) error { + return writeToLayerStore(s, func(rlstore rwLayerStore) (int64, error) { if rlstore.Exists(to) { - var err error - res, err = rlstore.ApplyDiff(to, diff) - return err + return rlstore.ApplyDiff(to, diff) } - return ErrLayerUnknown + return -1, ErrLayerUnknown }) - return res, err } func (s *store) layersByMappedDigest(m func(roLayerStore, digest.Digest) ([]Layer, error), d digest.Digest) ([]Layer, error) { var layers []Layer - if _, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if _, _, err := readAllLayerStores(s, func(store roLayerStore) (struct{}, bool, error) { storeLayers, err := m(store, d) if err != nil { if !errors.Is(err, ErrLayerUnknown) { - return true, err + return struct{}{}, true, err } - return false, nil + return struct{}{}, false, nil } layers = append(layers, storeLayers...) - return false, nil + return struct{}{}, false, nil }); err != nil { return nil, err } @@ -2926,16 +3000,17 @@ func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { } func (s *store) LayerSize(id string) (int64, error) { - var res int64 = -1 - if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) (int64, bool, error) { if store.Exists(id) { - var err error - res, err = store.Size(id) - return true, err + res, err := store.Size(id) + return res, true, err } - return false, nil + return -1, false, nil }); done { - return res, err + if err != nil { + return -1, err + } + return res, nil } return -1, ErrLayerUnknown } @@ -2980,13 +3055,13 @@ func (s *store) ContainerParentOwners(id string) ([]int, []int, error) { func (s *store) Layers() ([]Layer, error) { var layers []Layer - if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if _, done, err := readAllLayerStores(s, func(store roLayerStore) (struct{}, bool, error) { storeLayers, err := store.Layers() if err != nil { - return true, err + return struct{}{}, true, err } layers = append(layers, storeLayers...) - return false, nil + return struct{}{}, false, nil }); done { return nil, err } @@ -2995,13 +3070,13 @@ func (s *store) Layers() ([]Layer, error) { func (s *store) Images() ([]Image, error) { var images []Image - if _, err := s.readAllImageStores(func(store roImageStore) (bool, error) { + if _, _, err := readAllImageStores(s, func(store roImageStore) (struct{}, bool, error) { storeImages, err := store.Images() if err != nil { - return true, err + return struct{}{}, true, err } images = append(images, storeImages...) - return false, nil + return struct{}{}, false, nil }); err != nil { return nil, err } @@ -3009,23 +3084,20 @@ func (s *store) Images() ([]Image, error) { } func (s *store) Containers() ([]Container, error) { - if err := s.containerStore.startReading(); err != nil { - return nil, err - } - defer s.containerStore.stopReading() - - return s.containerStore.Containers() + res, _, err := readContainerStore(s, func() ([]Container, bool, error) { + res, err := s.containerStore.Containers() + return res, true, err + }) + return res, err } func (s *store) Layer(id string) (*Layer, error) { - var res *Layer - if done, err := s.readAllLayerStores(func(store roLayerStore) (bool, error) { + if res, done, err := readAllLayerStores(s, func(store roLayerStore) (*Layer, bool, error) { layer, err := store.Get(id) if err == nil { - res = layer - return true, nil + return layer, true, nil } - return false, nil + return nil, false, nil }); done { return res, err } @@ -3119,8 +3191,7 @@ func (al *additionalLayer) Release() { } func (s *store) Image(id string) (*Image, error) { - var res *Image - if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) { + if res, done, err := readAllImageStores(s, func(store roImageStore) (*Image, bool, error) { image, err := store.Get(id) if err == nil { if store != s.imageStore { @@ -3130,13 +3201,12 @@ func (s *store) Image(id string) (*Image, error) { // store, but we have an entry with the same ID in the read-write store, // then the name was removed when we duplicated the image's // record into writable storage, so we should ignore this entry - return false, nil + return nil, false, nil } } - res = image - return true, nil + return image, true, nil } - return false, nil + return nil, false, nil }); done { return res, err } @@ -3150,10 +3220,10 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { } images := []*Image{} - if _, err := s.readAllImageStores(func(store roImageStore) (bool, error) { + if _, _, err := readAllImageStores(s, func(store roImageStore) (struct{}, bool, error) { imageList, err := store.Images() if err != nil { - return true, err + return struct{}{}, true, err } for _, image := range imageList { image := image @@ -3161,7 +3231,7 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { images = append(images, &image) } } - return false, nil + return struct{}{}, false, nil }); err != nil { return nil, err } @@ -3170,13 +3240,13 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) { images := []*Image{} - if _, err := s.readAllImageStores(func(store roImageStore) (bool, error) { + if _, _, err := readAllImageStores(s, func(store roImageStore) (struct{}, bool, error) { imageList, err := store.ByDigest(d) if err != nil && !errors.Is(err, ErrImageUnknown) { - return true, err + return struct{}{}, true, err } images = append(images, imageList...) - return false, nil + return struct{}{}, false, nil }); err != nil { return nil, err } @@ -3184,20 +3254,18 @@ func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) { } func (s *store) Container(id string) (*Container, error) { - if err := s.containerStore.startReading(); err != nil { - return nil, err - } - defer s.containerStore.stopReading() - - return s.containerStore.Get(id) + res, _, err := readContainerStore(s, func() (*Container, bool, error) { + res, err := s.containerStore.Get(id) + return res, true, err + }) + return res, err } func (s *store) ContainerLayerID(id string) (string, error) { - if err := s.containerStore.startReading(); err != nil { - return "", err - } - defer s.containerStore.stopReading() - container, err := s.containerStore.Get(id) + container, _, err := readContainerStore(s, func() (*Container, bool, error) { + res, err := s.containerStore.Get(id) + return res, true, err + }) if err != nil { return "", err } @@ -3209,11 +3277,10 @@ func (s *store) ContainerByLayer(id string) (*Container, error) { if err != nil { return nil, err } - if err := s.containerStore.startReading(); err != nil { - return nil, err - } - defer s.containerStore.stopReading() - containerList, err := s.containerStore.Containers() + containerList, _, err := readContainerStore(s, func() ([]Container, bool, error) { + res, err := s.containerStore.Containers() + return res, true, err + }) if err != nil { return nil, err } @@ -3227,41 +3294,37 @@ func (s *store) ContainerByLayer(id string) (*Container, error) { } func (s *store) ContainerDirectory(id string) (string, error) { - if err := s.containerStore.startReading(); err != nil { - return "", err - } - defer s.containerStore.stopReading() - - id, err := s.containerStore.Lookup(id) - if err != nil { - return "", err - } + res, _, err := readContainerStore(s, func() (string, bool, error) { + id, err := s.containerStore.Lookup(id) + if err != nil { + return "", true, err + } - middleDir := s.graphDriverName + "-containers" - gcpath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata") - if err := os.MkdirAll(gcpath, 0700); err != nil { - return "", err - } - return gcpath, nil + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(gcpath, 0o700); err != nil { + return "", true, err + } + return gcpath, true, nil + }) + return res, err } func (s *store) ContainerRunDirectory(id string) (string, error) { - if err := s.containerStore.startReading(); err != nil { - return "", err - } - defer s.containerStore.stopReading() - - id, err := s.containerStore.Lookup(id) - if err != nil { - return "", err - } + res, _, err := readContainerStore(s, func() (string, bool, error) { + id, err := s.containerStore.Lookup(id) + if err != nil { + return "", true, err + } - middleDir := s.graphDriverName + "-containers" - rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata") - if err := os.MkdirAll(rcpath, 0700); err != nil { - return "", err - } - return rcpath, nil + middleDir := s.graphDriverName + "-containers" + rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(rcpath, 0o700); err != nil { + return "", true, err + } + return rcpath, true, nil + }) + return res, err } func (s *store) SetContainerDirectoryFile(id, file string, data []byte) error { @@ -3269,11 +3332,11 @@ func (s *store) SetContainerDirectoryFile(id, file string, data []byte) error { if err != nil { return err } - err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700) + err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0o700) if err != nil { return err } - return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600) + return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0o600) } func (s *store) FromContainerDirectory(id, file string) ([]byte, error) { @@ -3289,11 +3352,11 @@ func (s *store) SetContainerRunDirectoryFile(id, file string, data []byte) error if err != nil { return err } - err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700) + err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0o700) if err != nil { return err } - return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600) + return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0o600) } func (s *store) FromContainerRunDirectory(id, file string) ([]byte, error) { @@ -3541,19 +3604,19 @@ func (s *store) Free() { // Tries to clean up old unreferenced container leftovers. returns the first error // but continues as far as it can func (s *store) GarbageCollect() error { - firstErr := s.writeToContainerStore(func() error { - return s.containerStore.GarbageCollect() + _, firstErr := writeToContainerStore(s, func() (struct{}, error) { + return struct{}{}, s.containerStore.GarbageCollect() }) - moreErr := s.writeToImageStore(func() error { - return s.imageStore.GarbageCollect() + _, moreErr := writeToImageStore(s, func() (struct{}, error) { + return struct{}{}, s.imageStore.GarbageCollect() }) if firstErr == nil { firstErr = moreErr } - moreErr = s.writeToLayerStore(func(rlstore rwLayerStore) error { - return rlstore.GarbageCollect() + _, moreErr = writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { + return struct{}{}, rlstore.GarbageCollect() }) if firstErr == nil { firstErr = moreErr diff --git a/vendor/github.com/containers/storage/types/errors.go b/vendor/github.com/containers/storage/types/errors.go index dc6ee3e0c..845b14eed 100644 --- a/vendor/github.com/containers/storage/types/errors.go +++ b/vendor/github.com/containers/storage/types/errors.go @@ -59,4 +59,41 @@ var ( ErrInvalidMappings = errors.New("invalid mappings specified") // ErrNoAvailableIDs is returned when there are not enough unused IDS within the user namespace. ErrNoAvailableIDs = errors.New("not enough unused IDs in user namespace") + + // ErrLayerUnaccounted describes a layer that is present in the lower-level storage driver, + // but which is not known to or managed by the higher-level driver-agnostic logic. + ErrLayerUnaccounted = errors.New("layer in lower level storage driver not accounted for") + // ErrLayerUnreferenced describes a layer which is not used by any image or container. + ErrLayerUnreferenced = errors.New("layer not referenced by any images or containers") + // ErrLayerIncorrectContentDigest describes a layer for which the contents of one or more + // files which were added in the layer appear to have changed. It may instead look like an + // unnamed "file integrity checksum failed" error. + ErrLayerIncorrectContentDigest = errors.New("layer content incorrect digest") + // ErrLayerIncorrectContentSize describes a layer for which regenerating the diff that was + // used to populate the layer produced a diff of a different size. We check the digest + // first, so it's highly unlikely you'll ever see this error. + ErrLayerIncorrectContentSize = errors.New("layer content incorrect size") + // ErrLayerContentModified describes a layer which contains contents which should not be + // there, or for which ownership/permissions/dates have been changed. + ErrLayerContentModified = errors.New("layer content modified") + // ErrLayerDataMissing describes a layer which is missing a big data item. + ErrLayerDataMissing = errors.New("layer data item is missing") + // ErrLayerMissing describes a layer which is the missing parent of a layer. + ErrLayerMissing = errors.New("layer is missing") + // ErrImageLayerMissing describes an image which claims to have a layer that we don't know + // about. + ErrImageLayerMissing = errors.New("image layer is missing") + // ErrImageDataMissing describes an image which is missing a big data item. + ErrImageDataMissing = errors.New("image data item is missing") + // ErrImageDataIncorrectSize describes an image which has a big data item which looks like + // its size has changed, likely because it's been modified somehow. + ErrImageDataIncorrectSize = errors.New("image data item has incorrect size") + // ErrContainerImageMissing describes a container which claims to be based on an image that + // we don't know about. + ErrContainerImageMissing = errors.New("image missing") + // ErrContainerDataMissing describes a container which is missing a big data item. + ErrContainerDataMissing = errors.New("container data item is missing") + // ErrContainerDataIncorrectSize describes a container which has a big data item which looks + // like its size has changed, likely because it's been modified somehow. + ErrContainerDataIncorrectSize = errors.New("container data item has incorrect size") ) diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go index 7189a8e6a..3ff00ac64 100644 --- a/vendor/github.com/containers/storage/types/options.go +++ b/vendor/github.com/containers/storage/types/options.go @@ -21,6 +21,7 @@ type TomlConfig struct { Driver string `toml:"driver,omitempty"` DriverPriority []string `toml:"driver_priority,omitempty"` RunRoot string `toml:"runroot,omitempty"` + ImageStore string `toml:"imagestore,omitempty"` GraphRoot string `toml:"graphroot,omitempty"` RootlessStoragePath string `toml:"rootless_storage_path,omitempty"` TransientStore bool `toml:"transient_store,omitempty"` @@ -215,6 +216,10 @@ type StoreOptions struct { // GraphRoot is the filesystem path under which we will store the // contents of layers, images, and containers. GraphRoot string `json:"root,omitempty"` + // Image Store is the location of image store which is seperated from the + // container store. Usually this is not recommended unless users wants + // seperate store for image and containers. + ImageStore string `json:"imagestore,omitempty"` // RootlessStoragePath is the storage path for rootless users // default $HOME/.local/share/containers/storage RootlessStoragePath string `toml:"rootless_storage_path"` @@ -295,6 +300,7 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti // present. if defaultConfigFileSet { opts.GraphDriverOptions = systemOpts.GraphDriverOptions + opts.ImageStore = systemOpts.ImageStore } else if opts.GraphDriverName == overlayDriver { for _, o := range systemOpts.GraphDriverOptions { if strings.Contains(o, "ignore_chown_errors") { @@ -305,7 +311,23 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti } if opts.GraphDriverName == "" { if len(systemOpts.GraphDriverPriority) == 0 { - opts.GraphDriverName = "vfs" + dirEntries, err := os.ReadDir(opts.GraphRoot) + if err == nil { + for _, entry := range dirEntries { + if strings.HasSuffix(entry.Name(), "-images") { + opts.GraphDriverName = strings.TrimSuffix(entry.Name(), "-images") + break + } + } + } + + if opts.GraphDriverName == "" { + if canUseRootlessOverlay(opts.GraphRoot, opts.RunRoot) { + opts.GraphDriverName = overlayDriver + } else { + opts.GraphDriverName = "vfs" + } + } } else { opts.GraphDriverPriority = systemOpts.GraphDriverPriority } @@ -405,6 +427,9 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro if config.Storage.GraphRoot != "" { storeOptions.GraphRoot = config.Storage.GraphRoot } + if config.Storage.ImageStore != "" { + storeOptions.ImageStore = config.Storage.ImageStore + } if config.Storage.RootlessStoragePath != "" { storeOptions.RootlessStoragePath = config.Storage.RootlessStoragePath } @@ -432,6 +457,16 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro if config.Storage.Options.MountOpt != "" { storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.MountOpt)) } + + uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids") + if err != nil { + return err + } + gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids") + if err != nil { + return err + } + if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup == "" { config.Storage.Options.RemapGroup = config.Storage.Options.RemapUser } @@ -444,19 +479,9 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro logrus.Warningf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err) return err } - storeOptions.UIDMap = mappings.UIDs() - storeOptions.GIDMap = mappings.GIDs() + uidmap = mappings.UIDs() + gidmap = mappings.GIDs() } - - uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids") - if err != nil { - return err - } - gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids") - if err != nil { - return err - } - storeOptions.UIDMap = uidmap storeOptions.GIDMap = gidmap storeOptions.RootAutoNsUser = config.Storage.Options.RootAutoUsernsUser diff --git a/vendor/github.com/containers/storage/types/options_darwin.go b/vendor/github.com/containers/storage/types/options_darwin.go index eed1a3d94..3eecc2b82 100644 --- a/vendor/github.com/containers/storage/types/options_darwin.go +++ b/vendor/github.com/containers/storage/types/options_darwin.go @@ -8,6 +8,9 @@ const ( SystemConfigFile = "/usr/share/containers/storage.conf" ) -var ( - defaultOverrideConfigFile = "/etc/containers/storage.conf" -) +var defaultOverrideConfigFile = "/etc/containers/storage.conf" + +// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers +func canUseRootlessOverlay(home, runhome string) bool { + return false +} diff --git a/vendor/github.com/containers/storage/types/options_freebsd.go b/vendor/github.com/containers/storage/types/options_freebsd.go index afb7ec6b4..be2bc2f27 100644 --- a/vendor/github.com/containers/storage/types/options_freebsd.go +++ b/vendor/github.com/containers/storage/types/options_freebsd.go @@ -12,3 +12,8 @@ const ( var ( defaultOverrideConfigFile = "/usr/local/etc/containers/storage.conf" ) + +// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers +func canUseRootlessOverlay(home, runhome string) bool { + return false +} diff --git a/vendor/github.com/containers/storage/types/options_linux.go b/vendor/github.com/containers/storage/types/options_linux.go index d44aaf76a..a28e82883 100644 --- a/vendor/github.com/containers/storage/types/options_linux.go +++ b/vendor/github.com/containers/storage/types/options_linux.go @@ -1,5 +1,13 @@ package types +import ( + "os/exec" + "strconv" + "strings" + + "golang.org/x/sys/unix" +) + const ( // these are default path for run and graph root for rootful users // for rootless path is constructed via getRootlessStorageOpts @@ -12,3 +20,33 @@ const ( var ( defaultOverrideConfigFile = "/etc/containers/storage.conf" ) + +// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers +func canUseRootlessOverlay(home, runhome string) bool { + // we check first for fuse-overlayfs since it is cheaper. + if path, _ := exec.LookPath("fuse-overlayfs"); path != "" { + return true + } + + // We cannot use overlay.SupportsNativeOverlay since canUseRootlessOverlay is called by Podman + // before we enter the user namespace and the driver we pick here is written in the podman database. + // Checking the kernel version is usually not a good idea since the feature could be back-ported, e.g. RHEL + // but this is just an heuristic and on RHEL we always install the storage.conf file. + // native overlay for rootless was added upstream in 5.13 (at least the first version that we support), so check + // that the kernel is >= 5.13. + var uts unix.Utsname + if err := unix.Uname(&uts); err == nil { + parts := strings.Split(string(uts.Release[:]), ".") + major, _ := strconv.Atoi(parts[0]) + if major >= 6 { + return true + } + if major == 5 && len(parts) > 1 { + minor, _ := strconv.Atoi(parts[1]) + if minor >= 13 { + return true + } + } + } + return false +} diff --git a/vendor/github.com/containers/storage/types/options_windows.go b/vendor/github.com/containers/storage/types/options_windows.go index d44aaf76a..c1bea9fac 100644 --- a/vendor/github.com/containers/storage/types/options_windows.go +++ b/vendor/github.com/containers/storage/types/options_windows.go @@ -12,3 +12,8 @@ const ( var ( defaultOverrideConfigFile = "/etc/containers/storage.conf" ) + +// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers +func canUseRootlessOverlay(home, runhome string) bool { + return false +} diff --git a/vendor/github.com/containers/storage/types/storage_test.conf b/vendor/github.com/containers/storage/types/storage_test.conf index 9b682fe15..87b0c9bb1 100644 --- a/vendor/github.com/containers/storage/types/storage_test.conf +++ b/vendor/github.com/containers/storage/types/storage_test.conf @@ -25,6 +25,16 @@ rootless_storage_path = "$HOME/$UID/containers/storage" additionalimagestores = [ ] +# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of +# a container, to the UIDs/GIDs as they should appear outside of the container, +# and the length of the range of UIDs/GIDs. Additional mapped sets can be +# listed and will be heeded by libraries, but there are limits to the number of +# mappings which the kernel will allow when you later attempt to run a +# container. +# +remap-uids = "0:1000000000:30000" +remap-gids = "0:1500000000:60000" + [storage.options.overlay] # mountopt specifies comma separated list of extra mount options diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go index 72c38f861..73134f82d 100644 --- a/vendor/github.com/containers/storage/types/utils.go +++ b/vendor/github.com/containers/storage/types/utils.go @@ -20,7 +20,7 @@ func GetRootlessRuntimeDir(rootlessUID int) (string, error) { return "", err } path = filepath.Join(path, "containers") - if err := os.MkdirAll(path, 0700); err != nil { + if err := os.MkdirAll(path, 0o700); err != nil { return "", fmt.Errorf("unable to make rootless runtime: %w", err) } return path, nil @@ -45,25 +45,30 @@ type rootlessRuntimeDirEnvironmentImplementation struct { func (env rootlessRuntimeDirEnvironmentImplementation) getProcCommandFile() string { return env.procCommandFile } + func (env rootlessRuntimeDirEnvironmentImplementation) getRunUserDir() string { return env.runUserDir } + func (env rootlessRuntimeDirEnvironmentImplementation) getTmpPerUserDir() string { return env.tmpPerUserDir } + func (rootlessRuntimeDirEnvironmentImplementation) homeDirGetRuntimeDir() (string, error) { return homedir.GetRuntimeDir() } + func (rootlessRuntimeDirEnvironmentImplementation) systemLstat(path string) (*system.StatT, error) { return system.Lstat(path) } + func (rootlessRuntimeDirEnvironmentImplementation) homedirGet() string { return homedir.Get() } func isRootlessRuntimeDirOwner(dir string, env rootlessRuntimeDirEnvironment) bool { st, err := env.systemLstat(dir) - return err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0700 == 0700 && st.Mode()&0066 == 0000 + return err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0o700 == 0o700 && st.Mode()&0o066 == 0o000 } // getRootlessRuntimeDirIsolated is an internal implementation detail of getRootlessRuntimeDir to allow testing. @@ -85,7 +90,7 @@ func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, e tmpPerUserDir := env.getTmpPerUserDir() if tmpPerUserDir != "" { if _, err := env.systemLstat(tmpPerUserDir); os.IsNotExist(err) { - if err := os.Mkdir(tmpPerUserDir, 0700); err != nil { + if err := os.Mkdir(tmpPerUserDir, 0o700); err != nil { logrus.Errorf("Failed to create temp directory for user: %v", err) } else { return tmpPerUserDir, nil diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 55c8ca447..f710a34ec 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,19 @@ This package provides various compression algorithms. # changelog +* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) + * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 + * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 + +* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) + * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 + * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 + * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 + * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 + * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 + * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 + * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + * Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md new file mode 100644 index 000000000..23a43387b --- /dev/null +++ b/vendor/github.com/klauspost/compress/SECURITY.md @@ -0,0 +1,25 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Vulnerability Definition + +A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. + +Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. + +Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. + +It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. + +Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/klaupost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 82882961a..5faea0b2b 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -90,9 +90,8 @@ type advancedState struct { ii uint16 // position of last match, intended to overflow to reset. // input window: unprocessed data is window[index:windowEnd] - index int - estBitsPerByte int - hashMatch [maxMatchLength + minMatchLength]uint32 + index int + hashMatch [maxMatchLength + minMatchLength]uint32 // Input hash chains // hashHead[hashValue] contains the largest inputIndex with the specified hash value diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index 89a5dd89f..f70594c34 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -34,11 +34,6 @@ const ( // Should preferably be a multiple of 6, since // we accumulate 6 bytes between writes to the buffer. bufferFlushSize = 246 - - // bufferSize is the actual output byte buffer size. - // It must have additional headroom for a flush - // which can contain up to 8 bytes. - bufferSize = bufferFlushSize + 8 ) // Minimum length code that emits bits. diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go index 207780299..6c05ba8c1 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go @@ -42,25 +42,6 @@ func quickSortByFreq(data []literalNode, a, b, maxDepth int) { } } -// siftDownByFreq implements the heap property on data[lo, hi). -// first is an offset into the array where the root of the heap lies. -func siftDownByFreq(data []literalNode, lo, hi, first int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && (data[first+child].freq == data[first+child+1].freq && data[first+child].literal < data[first+child+1].literal || data[first+child].freq < data[first+child+1].freq) { - child++ - } - if data[first+root].freq == data[first+child].freq && data[first+root].literal > data[first+child].literal || data[first+root].freq > data[first+child].freq { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) { m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. if hi-lo > 40 { diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index aed2347ce..b4d7164e3 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -13,14 +13,6 @@ type bitWriter struct { out []byte } -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. // It will not check if there is space for them, so the caller must ensure that it has flushed recently. func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 3c0b398c7..54bd08b25 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -253,7 +253,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { switch d.actualTableLog { case 8: - const shift = 8 - 8 + const shift = 0 for br.off >= 4 { br.fillFast() v := dt[uint8(br.value>>(56+shift))] diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 05db94d39..2aa6a95a0 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -87,18 +87,6 @@ func emitCopy(dst []byte, offset, length int) int { return i + 2 } -// extendMatch returns the largest k such that k <= len(src) and that -// src[i:i+k-j] and src[j:k] have the same contents. -// -// It assumes that: -// -// 0 <= i && i < j && j <= len(src) -func extendMatch(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j -} - func hash(u, shift uint32) uint32 { return (u * 0x1e35a7bd) >> shift } diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index 65b38abed..bdd49c8b2 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -304,7 +304,7 @@ import "github.com/klauspost/compress/zstd" // Create a reader that caches decompressors. // For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) +var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) // Decompress a buffer. We don't supply a destination buffer, // so it will be allocated by the decoder. diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 5f272d87f..9f17ce601 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -592,7 +592,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { } seq.fse.setRLE(symb) if debugDecoder { - printf("RLE set to %+v, code: %v", symb, v) + printf("RLE set to 0x%x, code: %v", symb, v) } case compModeFSE: println("Reading table for", tableIndex(i)) diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index 512ffe5b9..55a388553 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -109,7 +109,7 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { } func (r *readerWrapper) readByte() (byte, error) { - n2, err := r.r.Read(r.tmp[:1]) + n2, err := io.ReadFull(r.r, r.tmp[:1]) if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index 07a90dd7a..774c5f00f 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -107,7 +107,7 @@ func WithDecoderDicts(dicts ...[]byte) DOption { } } -// WithEncoderDictRaw registers a dictionary that may be used by the decoder. +// WithDecoderDictRaw registers a dictionary that may be used by the decoder. // The slice content can be arbitrary data. func WithDecoderDictRaw(id uint32, content []byte) DOption { return func(o *decoderOptions) error { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 315b1a8f2..cbc626eec 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -133,8 +133,7 @@ encodeLoop: if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { // Consider history as well. var seq seq - var length int32 - length = 4 + e.matchlen(s+6, repIndex+4, src) + length := 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. @@ -645,8 +644,7 @@ encodeLoop: if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { // Consider history as well. var seq seq - var length int32 - length = 4 + e.matchlen(s+6, repIndex+4, src) + length := 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 50f70533b..faaf81921 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -129,7 +129,7 @@ func WithEncoderPadding(n int) EOption { } // No need to waste our time. if n == 1 { - o.pad = 0 + n = 0 } if n > 1<<30 { return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index cc0aa2274..53e160f7e 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -73,20 +73,20 @@ func (d *frameDec) reset(br byteBuffer) error { switch err { case io.EOF, io.ErrUnexpectedEOF: return io.EOF - default: - return err case nil: signature[0] = b[0] + default: + return err } // Read the rest, don't allow io.ErrUnexpectedEOF b, err = br.readSmall(3) switch err { case io.EOF: return io.EOF - default: - return err case nil: copy(signature[1:], b) + default: + return err } if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go new file mode 100644 index 000000000..f41932b7a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go @@ -0,0 +1,16 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) and len(a) > 0 +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s new file mode 100644 index 000000000..9a7655c0f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -0,0 +1,68 @@ +// Copied from S2 implementation. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func matchLen(a []byte, b []byte) int +// Requires: BMI +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JB matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + TESTQ BX, BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX +#else + BSFQ BX, BX +#endif + SARQ $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JAE matchlen_loopback_standalone + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JB matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL -2(DX), DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JB gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + INCL SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go new file mode 100644 index 000000000..57b9c31c0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -0,0 +1,33 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "math/bits" +) + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + } + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 89396673d..4be7cc736 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -9,7 +9,6 @@ import ( "errors" "log" "math" - "math/bits" ) // enable debug printing @@ -106,27 +105,6 @@ func printf(format string, a ...interface{}) { } } -// matchLen returns the maximum common prefix length of a and b. -// a must be the shortest of the two. -func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) - if diff != 0 { - return n + bits.TrailingZeros64(diff)>>3 - } - n += 8 - } - - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n - -} - func load3232(b []byte, i int32) uint32 { return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) } diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils.go b/vendor/github.com/mistifyio/go-zfs/v3/utils.go index 0c2cce7d9..b69942b53 100644 --- a/vendor/github.com/mistifyio/go-zfs/v3/utils.go +++ b/vendor/github.com/mistifyio/go-zfs/v3/utils.go @@ -37,13 +37,16 @@ func (c *command) Run(arg ...string) ([][]string, error) { cmd.Stderr = &stderr id := uuid.New().String() - joinedArgs := strings.Join(cmd.Args, " ") + joinedArgs := cmd.Path + if len(cmd.Args) > 1 { + joinedArgs = strings.Join(append([]string{cmd.Path}, cmd.Args[1:]...), " ") + } logger.Log([]string{"ID:" + id, "START", joinedArgs}) if err := cmd.Run(); err != nil { return nil, &Error{ Err: err, - Debug: strings.Join([]string{cmd.Path, joinedArgs[1:]}, " "), + Debug: joinedArgs, Stderr: stderr.String(), } } @@ -61,7 +64,7 @@ func (c *command) Run(arg ...string) ([][]string, error) { output := make([][]string, len(lines)) for i, l := range lines { - output[i] = strings.Fields(l) + output[i] = strings.Split(l, "\t") } return output, nil diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go b/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go index ef1beac90..b1ce59656 100644 --- a/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go +++ b/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go @@ -15,5 +15,5 @@ var ( zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio", "fragmentation", "freeing", "leaked"} zpoolPropListOptions = strings.Join(zpoolPropList, ",") - zpoolArgs = []string{"get", "-p", zpoolPropListOptions} + zpoolArgs = []string{"get", "-Hp", zpoolPropListOptions} ) diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go b/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go index c6bf6d87a..f19aebabb 100644 --- a/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go +++ b/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go @@ -15,5 +15,5 @@ var ( zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio"} zpoolPropListOptions = strings.Join(zpoolPropList, ",") - zpoolArgs = []string{"get", "-p", zpoolPropListOptions} + zpoolArgs = []string{"get", "-Hp", zpoolPropListOptions} ) diff --git a/vendor/github.com/mistifyio/go-zfs/v3/zpool.go b/vendor/github.com/mistifyio/go-zfs/v3/zpool.go index 2f7071305..a0bd6471a 100644 --- a/vendor/github.com/mistifyio/go-zfs/v3/zpool.go +++ b/vendor/github.com/mistifyio/go-zfs/v3/zpool.go @@ -49,9 +49,6 @@ func GetZpool(name string) (*Zpool, error) { return nil, err } - // there is no -H - out = out[1:] - z := &Zpool{Name: name} for _, line := range out { if err := z.parseLine(line); err != nil { diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go index 25f4e6e82..4e7717d53 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go @@ -33,6 +33,34 @@ type Spec struct { ZOS *ZOS `json:"zos,omitempty" platform:"zos"` } +// Scheduler represents the scheduling attributes for a process. It is based on +// the Linux sched_setattr(2) syscall. +type Scheduler struct { + // Policy represents the scheduling policy (e.g., SCHED_FIFO, SCHED_RR, SCHED_OTHER). + Policy LinuxSchedulerPolicy `json:"policy"` + + // Nice is the nice value for the process, which affects its priority. + Nice int32 `json:"nice,omitempty"` + + // Priority represents the static priority of the process. + Priority int32 `json:"priority,omitempty"` + + // Flags is an array of scheduling flags. + Flags []LinuxSchedulerFlag `json:"flags,omitempty"` + + // The following ones are used by the DEADLINE scheduler. + + // Runtime is the amount of time in nanoseconds during which the process + // is allowed to run in a given period. + Runtime uint64 `json:"runtime,omitempty"` + + // Deadline is the absolute deadline for the process to complete its execution. + Deadline uint64 `json:"deadline,omitempty"` + + // Period is the length of the period in nanoseconds used for determining the process runtime. + Period uint64 `json:"period,omitempty"` +} + // Process contains information to start a specific application inside the container. type Process struct { // Terminal creates an interactive terminal for the container. @@ -60,8 +88,12 @@ type Process struct { ApparmorProfile string `json:"apparmorProfile,omitempty" platform:"linux"` // Specify an oom_score_adj for the container. OOMScoreAdj *int `json:"oomScoreAdj,omitempty" platform:"linux"` + // Scheduler specifies the scheduling attributes for a process + Scheduler *Scheduler `json:"scheduler,omitempty" platform:"linux"` // SelinuxLabel specifies the selinux context that the container process is run as. SelinuxLabel string `json:"selinuxLabel,omitempty" platform:"linux"` + // IOPriority contains the I/O priority settings for the cgroup. + IOPriority *LinuxIOPriority `json:"ioPriority,omitempty" platform:"linux"` } // LinuxCapabilities specifies the list of allowed capabilities that are kept for a process. @@ -79,6 +111,22 @@ type LinuxCapabilities struct { Ambient []string `json:"ambient,omitempty" platform:"linux"` } +// IOPriority represents I/O priority settings for the container's processes within the process group. +type LinuxIOPriority struct { + Class IOPriorityClass `json:"class"` + Priority int `json:"priority"` +} + +// IOPriorityClass represents an I/O scheduling class. +type IOPriorityClass string + +// Possible values for IOPriorityClass. +const ( + IOPRIO_CLASS_RT IOPriorityClass = "IOPRIO_CLASS_RT" + IOPRIO_CLASS_BE IOPriorityClass = "IOPRIO_CLASS_BE" + IOPRIO_CLASS_IDLE IOPriorityClass = "IOPRIO_CLASS_IDLE" +) + // Box specifies dimensions of a rectangle. Used for specifying the size of a console. type Box struct { // Height is the vertical dimension of a box. @@ -789,3 +837,43 @@ type ZOSDevice struct { // Gid of the device. GID *uint32 `json:"gid,omitempty"` } + +// LinuxSchedulerPolicy represents different scheduling policies used with the Linux Scheduler +type LinuxSchedulerPolicy string + +const ( + // SchedOther is the default scheduling policy + SchedOther LinuxSchedulerPolicy = "SCHED_OTHER" + // SchedFIFO is the First-In-First-Out scheduling policy + SchedFIFO LinuxSchedulerPolicy = "SCHED_FIFO" + // SchedRR is the Round-Robin scheduling policy + SchedRR LinuxSchedulerPolicy = "SCHED_RR" + // SchedBatch is the Batch scheduling policy + SchedBatch LinuxSchedulerPolicy = "SCHED_BATCH" + // SchedISO is the Isolation scheduling policy + SchedISO LinuxSchedulerPolicy = "SCHED_ISO" + // SchedIdle is the Idle scheduling policy + SchedIdle LinuxSchedulerPolicy = "SCHED_IDLE" + // SchedDeadline is the Deadline scheduling policy + SchedDeadline LinuxSchedulerPolicy = "SCHED_DEADLINE" +) + +// LinuxSchedulerFlag represents the flags used by the Linux Scheduler. +type LinuxSchedulerFlag string + +const ( + // SchedFlagResetOnFork represents the reset on fork scheduling flag + SchedFlagResetOnFork LinuxSchedulerFlag = "SCHED_FLAG_RESET_ON_FORK" + // SchedFlagReclaim represents the reclaim scheduling flag + SchedFlagReclaim LinuxSchedulerFlag = "SCHED_FLAG_RECLAIM" + // SchedFlagDLOverrun represents the deadline overrun scheduling flag + SchedFlagDLOverrun LinuxSchedulerFlag = "SCHED_FLAG_DL_OVERRUN" + // SchedFlagKeepPolicy represents the keep policy scheduling flag + SchedFlagKeepPolicy LinuxSchedulerFlag = "SCHED_FLAG_KEEP_POLICY" + // SchedFlagKeepParams represents the keep parameters scheduling flag + SchedFlagKeepParams LinuxSchedulerFlag = "SCHED_FLAG_KEEP_PARAMS" + // SchedFlagUtilClampMin represents the utilization clamp minimum scheduling flag + SchedFlagUtilClampMin LinuxSchedulerFlag = "SCHED_FLAG_UTIL_CLAMP_MIN" + // SchedFlagUtilClampMin represents the utilization clamp maximum scheduling flag + SchedFlagUtilClampMax LinuxSchedulerFlag = "SCHED_FLAG_UTIL_CLAMP_MAX" +) diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go index 1b81f3c9d..41933fb17 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go @@ -11,7 +11,7 @@ const ( VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-rc.2" + VersionDev = "-rc.3" ) // Version is the specification version that the package types support. diff --git a/vendor/modules.txt b/vendor/modules.txt index 36904beca..af4207383 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -6,11 +6,13 @@ github.com/Azure/go-ansiterm/winterm ## explicit; go 1.16 github.com/BurntSushi/toml github.com/BurntSushi/toml/internal -# github.com/Microsoft/go-winio v0.6.0 +# github.com/Microsoft/go-winio v0.6.1 ## explicit; go 1.17 github.com/Microsoft/go-winio github.com/Microsoft/go-winio/backuptar +github.com/Microsoft/go-winio/internal/fs github.com/Microsoft/go-winio/internal/socket +github.com/Microsoft/go-winio/internal/stringbuffer github.com/Microsoft/go-winio/pkg/guid github.com/Microsoft/go-winio/tools/mkwinsyscall github.com/Microsoft/go-winio/vhd @@ -234,8 +236,8 @@ github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/host github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process -# github.com/containers/storage v1.46.1 -## explicit; go 1.18 +# github.com/containers/storage v1.48.0 +## explicit; go 1.19 github.com/containers/storage github.com/containers/storage/drivers github.com/containers/storage/drivers/aufs @@ -496,7 +498,7 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.16.4 +# github.com/klauspost/compress v1.16.6 ## explicit; go 1.18 github.com/klauspost/compress github.com/klauspost/compress/flate @@ -506,7 +508,7 @@ github.com/klauspost/compress/internal/cpuinfo github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash -# github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 +# github.com/klauspost/pgzip v1.2.6 ## explicit github.com/klauspost/pgzip # github.com/kr/fs v0.1.0 @@ -548,7 +550,7 @@ github.com/mattn/go-shellwords # github.com/miekg/pkcs11 v1.1.1 ## explicit; go 1.12 github.com/miekg/pkcs11 -# github.com/mistifyio/go-zfs/v3 v3.0.0 +# github.com/mistifyio/go-zfs/v3 v3.0.1 ## explicit; go 1.14 github.com/mistifyio/go-zfs/v3 # github.com/mitchellh/mapstructure v1.5.0 @@ -621,7 +623,7 @@ github.com/opencontainers/go-digest ## explicit; go 1.17 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v1.1.5 => github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc +# github.com/opencontainers/runc v1.1.7 => github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc ## explicit; go 1.17 github.com/opencontainers/runc/libcontainer/apparmor github.com/opencontainers/runc/libcontainer/cgroups @@ -633,7 +635,7 @@ github.com/opencontainers/runc/libcontainer/devices github.com/opencontainers/runc/libcontainer/user github.com/opencontainers/runc/libcontainer/userns github.com/opencontainers/runc/libcontainer/utils -# github.com/opencontainers/runtime-spec v1.1.0-rc.2 +# github.com/opencontainers/runtime-spec v1.1.0-rc.3 ## explicit github.com/opencontainers/runtime-spec/specs-go # github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69