From 0fa8a978e99c6eb7bf340ddceec7824d8a8f09e2 Mon Sep 17 00:00:00 2001 From: Marc Odermatt Date: Mon, 12 Feb 2024 11:55:38 +0100 Subject: [PATCH 1/8] Make once, cron and begin/frequency flags mutually exclusive - Make the flags themselves mutually exclusive and remove check in timer.go. This check always failed when using cron, as the begin and frequency options are populated with defaults. - add else if for timer creation, to prevent overwriting cron delay with `begin +0` delay from default --- cmd/dump.go | 6 ++++++ pkg/core/timer.go | 14 +------------- 2 files changed, 7 insertions(+), 13 deletions(-) diff --git a/cmd/dump.go b/cmd/dump.go index b04f7f02..828c18d7 100644 --- a/cmd/dump.go +++ b/cmd/dump.go @@ -227,5 +227,11 @@ S3: If it is a URL of the format s3://bucketname/path then it will connect via S // max-allowed-packet size flags.Int("max-allowed-packet", defaultMaxAllowedPacket, "Maximum size of the buffer for client/server communication, similar to mysqldump's max_allowed_packet. 0 means to use the default size.") + cmd.MarkFlagsMutuallyExclusive("once", "cron") + cmd.MarkFlagsMutuallyExclusive("once", "begin") + cmd.MarkFlagsMutuallyExclusive("once", "frequency") + cmd.MarkFlagsMutuallyExclusive("cron", "begin") + cmd.MarkFlagsMutuallyExclusive("cron", "frequency") + return cmd, nil } diff --git a/pkg/core/timer.go b/pkg/core/timer.go index 37872b77..3a1d112d 100644 --- a/pkg/core/timer.go +++ b/pkg/core/timer.go @@ -1,7 +1,6 @@ package core import ( - "errors" "fmt" "regexp" "strconv" @@ -40,16 +39,6 @@ func Timer(opts TimerOptions) (<-chan Update, error) { ) now := time.Now() - - // validate we do not have conflicting options - if opts.Once && (opts.Cron != "" || opts.Begin != "" || opts.Frequency != 0) { - return nil, errors.New("option 'Once' is exclusive and must not be used with Begin, Cron or Frequency") - } - - if opts.Cron != "" && (opts.Begin != "" || opts.Frequency != 0) { - return nil, errors.New("option 'Cron' is exclusive and must not be used with Begin, Once or Frequency") - } - // parse the options to determine our delays if opts.Cron != "" { // calculate delay until next cron moment as defined @@ -57,8 +46,7 @@ func Timer(opts TimerOptions) (<-chan Update, error) { if err != nil { return nil, fmt.Errorf("invalid cron format '%s': %v", opts.Cron, err) } - } - if opts.Begin != "" { + } else if opts.Begin != "" { // calculate how long to wait minsRe, err := regexp.Compile(`^\+([0-9]+)$`) if err != nil { From d48fc28918b96e122636bf41cf5ac3e4aef12c6f Mon Sep 17 00:00:00 2001 From: Marc Odermatt Date: Fri, 23 Feb 2024 18:03:37 +0100 Subject: [PATCH 2/8] Unit tests for dump flags - passing tests for once, cron, begin, frequency - error tests for incompatible combinations of flags --- cmd/dump_test.go | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/cmd/dump_test.go b/cmd/dump_test.go index 2e260561..34070aa4 100644 --- a/cmd/dump_test.go +++ b/cmd/dump_test.go @@ -14,7 +14,7 @@ import ( ) func TestDumpCmd(t *testing.T) { - t.Parallel() + //t.Parallel() fileTarget := "file:///foo/bar" fileTargetURL, _ := url.Parse(fileTarget) @@ -34,12 +34,41 @@ func TestDumpCmd(t *testing.T) { Compressor: &compression.GzipCompressor{}, DBConn: database.Connection{Host: "abc"}, }, core.TimerOptions{Frequency: defaultFrequency, Begin: defaultBegin}}, + {"once flag", []string{"--server", "abc", "--target", "file:///foo/bar", "--once"}, "", false, core.DumpOptions{ + Targets: []storage.Storage{file.New(*fileTargetURL)}, + MaxAllowedPacket: defaultMaxAllowedPacket, + Compressor: &compression.GzipCompressor{}, + DBConn: database.Connection{Host: "abc"}, + }, core.TimerOptions{Frequency: defaultFrequency, Begin: defaultBegin, Once: true}}, + {"cron flag", []string{"--server", "abc", "--target", "file:///foo/bar", "--cron", "0 0 * * *"}, "", false, core.DumpOptions{ + Targets: []storage.Storage{file.New(*fileTargetURL)}, + MaxAllowedPacket: defaultMaxAllowedPacket, + Compressor: &compression.GzipCompressor{}, + DBConn: database.Connection{Host: "abc"}, + }, core.TimerOptions{Frequency: defaultFrequency, Begin: defaultBegin, Cron: "0 0 * * *"}}, + {"begin flag", []string{"--server", "abc", "--target", "file:///foo/bar", "--begin", "1234"}, "", false, core.DumpOptions{ + Targets: []storage.Storage{file.New(*fileTargetURL)}, + MaxAllowedPacket: defaultMaxAllowedPacket, + Compressor: &compression.GzipCompressor{}, + DBConn: database.Connection{Host: "abc"}, + }, core.TimerOptions{Frequency: defaultFrequency, Begin: "1234"}}, + {"frequency flag", []string{"--server", "abc", "--target", "file:///foo/bar", "--frequency", "10"}, "", false, core.DumpOptions{ + Targets: []storage.Storage{file.New(*fileTargetURL)}, + MaxAllowedPacket: defaultMaxAllowedPacket, + Compressor: &compression.GzipCompressor{}, + DBConn: database.Connection{Host: "abc"}, + }, core.TimerOptions{Frequency: 10, Begin: defaultBegin}}, {"config file", []string{"--config-file", "testdata/config.yml"}, "", false, core.DumpOptions{ Targets: []storage.Storage{file.New(*fileTargetURL)}, MaxAllowedPacket: defaultMaxAllowedPacket, Compressor: &compression.GzipCompressor{}, DBConn: database.Connection{Host: "abc", Port: 3306, User: "user", Pass: "xxxx"}, }, core.TimerOptions{Frequency: defaultFrequency, Begin: defaultBegin}}, + {"incompatible flags: once/cron", []string{"--server", "abc", "--target", "file:///foo/bar", "--once", "--cron", "0 0 * * *"}, "", true, core.DumpOptions{}, core.TimerOptions{}}, + {"incompatible flags: once/begin", []string{"--server", "abc", "--target", "file:///foo/bar", "--once", "--begin", "1234"}, "", true, core.DumpOptions{}, core.TimerOptions{}}, + {"incompatible flags: once/frequency", []string{"--server", "abc", "--target", "file:///foo/bar", "--once", "--frequency", "10"}, "", true, core.DumpOptions{}, core.TimerOptions{}}, + {"incompatible flags: cron/begin", []string{"--server", "abc", "--target", "file:///foo/bar", "--cron", "0 0 * * *", "--begin", "1234"}, "", true, core.DumpOptions{}, core.TimerOptions{}}, + {"incompatible flags: cron/frequency", []string{"--server", "abc", "--target", "file:///foo/bar", "--cron", "0 0 * * *", "--frequency", "10"}, "", true, core.DumpOptions{}, core.TimerOptions{}}, } for _, tt := range tests { From dc221e154ab92bffe2afbac5c12f684bbf6722ad Mon Sep 17 00:00:00 2001 From: Marc Odermatt Date: Fri, 23 Feb 2024 18:10:11 +0100 Subject: [PATCH 3/8] Unit tests for dump flags revert commented out t.Parallel() --- cmd/dump_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/dump_test.go b/cmd/dump_test.go index 34070aa4..66d96fcc 100644 --- a/cmd/dump_test.go +++ b/cmd/dump_test.go @@ -14,7 +14,7 @@ import ( ) func TestDumpCmd(t *testing.T) { - //t.Parallel() + t.Parallel() fileTarget := "file:///foo/bar" fileTargetURL, _ := url.Parse(fileTarget) From 3c8ad9de9b3b9928209fd11231e454e9244cf672 Mon Sep 17 00:00:00 2001 From: Marc Odermatt Date: Wed, 13 Mar 2024 18:14:49 +0100 Subject: [PATCH 4/8] fix: Mutually exclusive flags, fix formatting --- pkg/core/timer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/core/timer.go b/pkg/core/timer.go index 3852c239..3f455bb3 100644 --- a/pkg/core/timer.go +++ b/pkg/core/timer.go @@ -38,7 +38,7 @@ func Timer(opts TimerOptions) (<-chan Update, error) { err error ) - now := time.Now().UTC() + now := time.Now().UTC() // parse the options to determine our delays if opts.Cron != "" { // calculate delay until next cron moment as defined From f6be13cc0030e640b6f4e0fe70293caf80df44de Mon Sep 17 00:00:00 2001 From: Avi Deitcher Date: Tue, 12 Mar 2024 16:34:25 +0200 Subject: [PATCH 5/8] enable retention Signed-off-by: Avi Deitcher --- README.md | 3 ++- docs/configuration.md | 38 +++++++++++--------------- docs/prune.md | 63 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 80 insertions(+), 24 deletions(-) create mode 100644 docs/prune.md diff --git a/README.md b/README.md index f6b564b1..c16c1884 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ Back up mysql databases to... anywhere! ## Overview -mysql-backup is a simple way to do MySQL database backups and restores. +mysql-backup is a simple way to do MySQL database backups and restores, as well as manage your backups. It has the following features: @@ -14,6 +14,7 @@ It has the following features: * connect to any container running on the same system * select how often to run a dump * select when to start the first dump, whether time of day or relative to container start time +* prune backups older than a specific time period or quantity Please see [CONTRIBUTORS.md](./CONTRIBUTORS.md) for a list of contributors. diff --git a/docs/configuration.md b/docs/configuration.md index 69548c69..d7a4451a 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -58,7 +58,7 @@ Various sample configuration files are available in the [sample-configs](../samp ## Configuration Options -The following are the environment variables, CLI flags and configuration file options for a backup or a restore. +The following are the environment variables, CLI flags and configuration file options for: backup(B), restore (R), prune (P). | Purpose | Backup / Restore | CLI Flag | Env Var | Config Key | Default | | --- | --- | --- | --- | --- | --- | @@ -70,21 +70,21 @@ The following are the environment variables, CLI flags and configuration file op | names of databases to exclude from the dump | B | `exclude` | `DB_NAMES_EXCLUDE` | `database.exclude` | | | do not include `USE ;` statement in the dump | B | `no-database-name` | `NO_DATABASE_NAME` | `database.no-database-name` | `false` | | restore to a specific database | R | `restore --database` | `RESTORE_DATABASE` | `restore.database` | | -| how often to do a dump, in minutes | B | `dump --frequency` | `DB_DUMP_FREQ` | `dump.schedule.frequency` | `1440` (in minutes), i.e. once per day | -| what time to do the first dump | B | `dump --begin` | `DB_DUMP_BEGIN` | `dump.schedule.begin` | `0`, i.e. immediately | -| cron schedule for dumps | B | `dump --cron` | `DB_DUMP_CRON` | `dump.schedule.cron` | | -| run the backup a single time and exit | B | `dump --once` | `RUN_ONCE` | `dump.schedule.once` | `false` | -| enable debug logging | BR | `debug` | `DEBUG` | `logging: debug` | `false` | -| where to put the dump file; see [backup](./backup.md) | B | `dump --target` | `DB_DUMP_TARGET` | `dump.targets` | | +| how often to do a dump or prune, in minutes | BP | `dump --frequency` | `DB_DUMP_FREQ` | `dump.schedule.frequency` | `1440` (in minutes), i.e. once per day | +| what time to do the first dump or prune | BP | `dump --begin` | `DB_DUMP_BEGIN` | `dump.schedule.begin` | `0`, i.e. immediately | +| cron schedule for dumps or prunes | BP | `dump --cron` | `DB_DUMP_CRON` | `dump.schedule.cron` | | +| run the backup or prune a single time and exit | BP | `dump --once` | `RUN_ONCE` | `dump.schedule.once` | `false` | +| enable debug logging | BRP | `debug` | `DEBUG` | `logging: debug` | `false` | +| where to put the dump file; see [backup](./backup.md) | BP | `dump --target` | `DB_DUMP_TARGET` | `dump.targets` | | | where the restore file exists; see [restore](./restore.md) | R | `restore --target` | `DB_RESTORE_TARGET` | `restore.target` | | -| replace any `:` in the dump filename with `-` | B | `dump --safechars` | `DB_DUMP_SAFECHARS` | `database.safechars` | `false` | -| AWS access key ID, used only if a target does not have one | BR | `aws-access-key-id` | `AWS_ACCESS_KEY_ID` | `dump.targets[s3-target].credentials.access-key-id` | | -| AWS secret access key, used only if a target does not have one | BR | `aws-secret-access-key` | `AWS_SECRET_ACCESS_KEY` | `dump.targets[s3-target].credentials.secret-access-key` | | -| AWS default region, used only if a target does not have one | BR | `aws-region` | `AWS_REGION` | `dump.targets[s3-target].region` | | +| replace any `:` in the dump filename with `-` | BP | `dump --safechars` | `DB_DUMP_SAFECHARS` | `database.safechars` | `false` | +| AWS access key ID, used only if a target does not have one | BRP | `aws-access-key-id` | `AWS_ACCESS_KEY_ID` | `dump.targets[s3-target].credentials.access-key-id` | | +| AWS secret access key, used only if a target does not have one | BRP | `aws-secret-access-key` | `AWS_SECRET_ACCESS_KEY` | `dump.targets[s3-target].credentials.secret-access-key` | | +| AWS default region, used only if a target does not have one | BRP | `aws-region` | `AWS_REGION` | `dump.targets[s3-target].region` | | | alternative endpoint URL for S3-interoperable systems, used only if a target does not have one | BR | `aws-endpoint-url` | `AWS_ENDPOINT_URL` | `dump.targets[s3-target].endpoint` | | -| SMB username, used only if a target does not have one | BR | `smb-user` | `SMB_USER` | `dump.targets[smb-target].credentials.username` | | -| SMB password, used only if a target does not have one | BR | `smb-pass` | `SMB_PASS` | `dump.targets[smb-target].credentials.password` | | -| compression to use, one of: `bzip2`, `gzip` | B | `compression` | `COMPRESSION` | `dump.compression` | `gzip` | +| SMB username, used only if a target does not have one | BRP | `smb-user` | `SMB_USER` | `dump.targets[smb-target].credentials.username` | | +| SMB password, used only if a target does not have one | BRP | `smb-pass` | `SMB_PASS` | `dump.targets[smb-target].credentials.password` | | +| compression to use, one of: `bzip2`, `gzip` | BP | `compression` | `COMPRESSION` | `dump.compression` | `gzip` | | when in container, run the dump or restore with `nice`/`ionice` | BR | `` | `NICE` | `` | `false` | | tmp directory to be used during backup creation and other operations | BR | `tmp` | `TMP_PATH` | `tmp` | system-defined | | filename to save the target backup file | B | `dump --filename-pattern` | `DB_DUMP_FILENAME_PATTERN` | `dump.filename-pattern` | | @@ -92,12 +92,4 @@ The following are the environment variables, CLI flags and configuration file op | directory with scripts to execute after backup | B | `dump --post-backup-scripts` | `DB_DUMP_POST_BACKUP_SCRIPTS` | `dump.scripts.post-backup` | in container, `/scripts.d/post-backup/` | | directory with scripts to execute before restore | R | `restore --pre-restore-scripts` | `DB_DUMP_PRE_RESTORE_SCRIPTS` | `dump.pre-restore-scripts` | in container, `/scripts.d/pre-restore/` | | directory with scripts to execute after restore | R | `restore --post-restore-scripts` | `DB_DUMP_POST_RESTORE_SCRIPTS` | `dump.post-restore-scripts` | in container, `/scripts.d/post-restore/` | - - -## Unsupported Options - -Unsupported options from the old version of `mysql-backup`: - -* `MYSQLDUMP_OPTS`: A string of options to pass to `mysqldump`, e.g. `MYSQLDUMP_OPTS="--opt abc --param def --max_allowed_packet=123455678"` will run `mysqldump --opt abc --param def --max_allowed_packet=123455678`. These are replaced by individual options. -* `AWS_CLI_OPTS`: Additional arguments to be passed to the `aws` part of the `aws s3 cp` command, click [here](https://docs.aws.amazon.com/cli/latest/reference/#options) for a list. These are replaced by target-specific options. -* `AWS_CLI_S3_CP_OPTS`: Additional arguments to be passed to the `s3 cp` part of the `aws s3 cp` command, click [here](https://docs.aws.amazon.com/cli/latest/reference/s3/cp.html#options) for a list. If you are using AWS KMS, `sse`, `sse-kms-key-id`, etc., may be of interest. These are replaced by target-specific options +| retention policy for backups | BP | `dump --retention` | `RETENTION` | `prune.retention` | Infinite | diff --git a/docs/prune.md b/docs/prune.md new file mode 100644 index 00000000..87c74067 --- /dev/null +++ b/docs/prune.md @@ -0,0 +1,63 @@ +# Pruning + +Pruning is the process of removing backups that no longer are needed. + +mysql-backup does **not** do this by default; it is up to you to enable this feature, if you want it. + +## Launching Pruning + +Pruning happens only in the following scenarios: + +* During pruning runs +* During backup runs + +It does not occur during restore runs. + +### Pruning Runs + +You can start `mysql-backup` with the command `prune` to run a pruning operation. It will prune any backups that are no longer needed. + +Like backups, it can run once and then exit, or it can run on a schedule. + +It uses the same configuration options for scheduling as backups, see the [scheduling](./scheduling.md) documentation for more information, +specifically the section about [Scheduling Options](./scheduling.md#scheduling-options). + +### Backup Runs + +When running `mysql-backup` in backup mode, it _optionally_ can also prune older backups before each backup run. +When enabled, it will prune any backups that fit the pruning criteria. + +## Pruning Criteria + +Pruning can be on the basis of the _age_ of a specific backup, or the _number_ of backups. Both are set by the configuration setting: + +* Environment variable: `RETENTION=` +* CLI flag: `dump --retention=` or `prune --retention=` +* Config file: +```yaml +prune: + retention: +``` + +The value of retention always is an integer followed by a letter. The letter can one of: + +* `h` - hours, e.g. `2h` +* `d` - days, e.g. `3d` +* `w` - weeks, e.g. `4w` +* `m` - months, e.g. `3m` +* `y` - years, e.g. `5y` +* `c` - count, how many backup to keep, e.g. `10c`; this could have been simply `10`, but was kept as `c` to avoid accidental confusion with the other options. + +Most of these are interchangeable, e.g. `3d` is the same as `72h`, and `4w` is the same as `28d` is the same as `672h`. + +When calculating whether or not to prune, `mysql-backup` __always__ converts the amount to hours, and then errs on the side of caution. +For example, if provided `7d`, it will convert that to `168h`, and then prune any backups older than 168 full hours. If it is 167 hours and 59 minutes old, it +will not be pruned. + +## Determining backup age + +Pruning depends on the name of the backup file, rather than the timestamp on the target filesystem, as the latter can be unreliable. +This means that the filename must be of a known pattern. + +As of this writing, pruning only work for backup files whose filename uses the default naming scheme, as described in +["Dump File" in backup documentation](./backup.md#dump-file). We hope to support custom filenames in the future. From 8a3681340bd72fc3d941c0233e3aa44e49291153 Mon Sep 17 00:00:00 2001 From: Avi Deitcher Date: Wed, 13 Mar 2024 12:27:47 +0200 Subject: [PATCH 6/8] implement prune command Signed-off-by: Avi Deitcher --- cmd/common_test.go | 17 +++- cmd/dump.go | 94 ++++++++++++-------- cmd/dump_test.go | 67 ++++++++++----- cmd/prune.go | 139 ++++++++++++++++++++++++++++++ cmd/prune_test.go | 60 +++++++++++++ cmd/restore.go | 17 ++-- cmd/restore_test.go | 3 +- cmd/root.go | 55 ++++++------ cmd/testdata/config.yml | 9 +- pkg/config/type.go | 5 ++ pkg/core/dump.go | 19 ----- pkg/core/prune.go | 180 +++++++++++++++++++++++++++++++++++++++ pkg/core/prune_test.go | 132 ++++++++++++++++++++++++++++ pkg/core/pruneoptions.go | 13 +++ pkg/core/timer.go | 21 +++++ pkg/storage/file/file.go | 22 +++++ pkg/storage/s3/s3.go | 66 ++++++++++++++ pkg/storage/smb/smb.go | 102 ++++++++++++++-------- pkg/storage/storage.go | 5 ++ test/backup_test.go | 4 +- 20 files changed, 879 insertions(+), 151 deletions(-) create mode 100644 cmd/prune.go create mode 100644 cmd/prune_test.go create mode 100644 pkg/core/prune.go create mode 100644 pkg/core/prune_test.go create mode 100644 pkg/core/pruneoptions.go diff --git a/cmd/common_test.go b/cmd/common_test.go index 425cfd2f..ec39025f 100644 --- a/cmd/common_test.go +++ b/cmd/common_test.go @@ -17,8 +17,8 @@ func newMockExecs() *mockExecs { return m } -func (m *mockExecs) timerDump(opts core.DumpOptions, timerOpts core.TimerOptions) error { - args := m.Called(opts, timerOpts) +func (m *mockExecs) dump(opts core.DumpOptions) error { + args := m.Called(opts) return args.Error(0) } @@ -26,3 +26,16 @@ func (m *mockExecs) restore(target storage.Storage, targetFile string, dbconn da args := m.Called(target, targetFile, dbconn, databasesMap, compressor) return args.Error(0) } + +func (m *mockExecs) prune(opts core.PruneOptions) error { + args := m.Called(opts) + return args.Error(0) +} +func (m *mockExecs) timer(timerOpts core.TimerOptions, cmd func() error) error { + args := m.Called(timerOpts) + err := args.Error(0) + if err != nil { + return err + } + return cmd() +} diff --git a/cmd/dump.go b/cmd/dump.go index 828c18d7..856d5db0 100644 --- a/cmd/dump.go +++ b/cmd/dump.go @@ -20,7 +20,10 @@ const ( defaultMaxAllowedPacket = 4194304 ) -func dumpCmd(execs execs) (*cobra.Command, error) { +func dumpCmd(execs execs, cmdConfig *cmdConfiguration) (*cobra.Command, error) { + if cmdConfig == nil { + return nil, fmt.Errorf("cmdConfig is nil") + } var v *viper.Viper var cmd = &cobra.Command{ Use: "dump", @@ -43,7 +46,7 @@ func dumpCmd(execs execs) (*cobra.Command, error) { ) if len(targetURLs) > 0 { for _, t := range targetURLs { - store, err := storage.ParseURL(t, creds) + store, err := storage.ParseURL(t, cmdConfig.creds) if err != nil { return fmt.Errorf("invalid target url: %v", err) } @@ -51,10 +54,10 @@ func dumpCmd(execs execs) (*cobra.Command, error) { } } else { // try the config file - if configuration != nil { + if cmdConfig.configuration != nil { // parse the target objects, then the ones listed for the backup - targetStructures := configuration.Targets - dumpTargets := configuration.Dump.Targets + targetStructures := cmdConfig.configuration.Targets + dumpTargets := cmdConfig.configuration.Dump.Targets for _, t := range dumpTargets { var store storage.Storage if target, ok := targetStructures[t]; !ok { @@ -73,40 +76,40 @@ func dumpCmd(execs execs) (*cobra.Command, error) { return fmt.Errorf("no targets specified") } safechars := v.GetBool("safechars") - if !v.IsSet("safechars") && configuration != nil { - safechars = configuration.Dump.Safechars + if !v.IsSet("safechars") && cmdConfig.configuration != nil { + safechars = cmdConfig.configuration.Dump.Safechars } include := v.GetStringSlice("include") - if len(include) == 0 && configuration != nil { - include = configuration.Dump.Include + if len(include) == 0 && cmdConfig.configuration != nil { + include = cmdConfig.configuration.Dump.Include } // make this slice nil if it's empty, so it is consistent; used mainly for test consistency if len(include) == 0 { include = nil } exclude := v.GetStringSlice("exclude") - if len(exclude) == 0 && configuration != nil { - exclude = configuration.Dump.Exclude + if len(exclude) == 0 && cmdConfig.configuration != nil { + exclude = cmdConfig.configuration.Dump.Exclude } // make this slice nil if it's empty, so it is consistent; used mainly for test consistency if len(exclude) == 0 { exclude = nil } preBackupScripts := v.GetString("pre-backup-scripts") - if preBackupScripts == "" && configuration != nil { - preBackupScripts = configuration.Dump.Scripts.PreBackup + if preBackupScripts == "" && cmdConfig.configuration != nil { + preBackupScripts = cmdConfig.configuration.Dump.Scripts.PreBackup } noDatabaseName := v.GetBool("no-database-name") - if !v.IsSet("no-database-name") && configuration != nil { - noDatabaseName = configuration.Dump.NoDatabaseName + if !v.IsSet("no-database-name") && cmdConfig.configuration != nil { + noDatabaseName = cmdConfig.configuration.Dump.NoDatabaseName } compact := v.GetBool("compact") - if !v.IsSet("compact") && configuration != nil { - compact = configuration.Dump.Compact + if !v.IsSet("compact") && cmdConfig.configuration != nil { + compact = cmdConfig.configuration.Dump.Compact } maxAllowedPacket := v.GetInt("max-allowed-packet") - if !v.IsSet("max-allowed-packet") && configuration != nil && configuration.Dump.MaxAllowedPacket != 0 { - maxAllowedPacket = configuration.Dump.MaxAllowedPacket + if !v.IsSet("max-allowed-packet") && cmdConfig.configuration != nil && cmdConfig.configuration.Dump.MaxAllowedPacket != 0 { + maxAllowedPacket = cmdConfig.configuration.Dump.MaxAllowedPacket } // compression algorithm: check config, then CLI/env var overrides @@ -114,8 +117,8 @@ func dumpCmd(execs execs) (*cobra.Command, error) { compressionAlgo string compressor compression.Compressor ) - if configuration != nil { - compressionAlgo = configuration.Dump.Compression + if cmdConfig.configuration != nil { + compressionAlgo = cmdConfig.configuration.Dump.Compression } compressionVar := v.GetString("compression") if compressionVar != "" { @@ -131,7 +134,7 @@ func dumpCmd(execs execs) (*cobra.Command, error) { Targets: targets, Safechars: safechars, DBNames: include, - DBConn: dbconn, + DBConn: cmdConfig.dbconn, Compressor: compressor, Exclude: exclude, PreBackupScripts: preBackupScripts, @@ -141,22 +144,28 @@ func dumpCmd(execs execs) (*cobra.Command, error) { MaxAllowedPacket: maxAllowedPacket, } + // retention, if enabled + retention := v.GetString("retention") + if retention == "" && cmdConfig.configuration != nil { + retention = cmdConfig.configuration.Prune.Retention + } + // timer options once := v.GetBool("once") - if !v.IsSet("once") && configuration != nil { - once = configuration.Dump.Schedule.Once + if !v.IsSet("once") && cmdConfig.configuration != nil { + once = cmdConfig.configuration.Dump.Schedule.Once } cron := v.GetString("cron") - if cron == "" && configuration != nil { - cron = configuration.Dump.Schedule.Cron + if cron == "" && cmdConfig.configuration != nil { + cron = cmdConfig.configuration.Dump.Schedule.Cron } begin := v.GetString("begin") - if begin == "" && configuration != nil { - begin = configuration.Dump.Schedule.Begin + if begin == "" && cmdConfig.configuration != nil { + begin = cmdConfig.configuration.Dump.Schedule.Begin } frequency := v.GetInt("frequency") - if frequency == 0 && configuration != nil { - frequency = configuration.Dump.Schedule.Frequency + if frequency == 0 && cmdConfig.configuration != nil { + frequency = cmdConfig.configuration.Dump.Schedule.Frequency } timerOpts := core.TimerOptions{ Once: once, @@ -164,12 +173,27 @@ func dumpCmd(execs execs) (*cobra.Command, error) { Begin: begin, Frequency: frequency, } - dump := core.TimerDump + dump := core.Dump + prune := core.Prune + timer := core.TimerCommand if execs != nil { - dump = execs.timerDump + dump = execs.dump + prune = execs.prune + timer = execs.timer } - if err := dump(dumpOpts, timerOpts); err != nil { - return err + if err := timer(timerOpts, func() error { + err := dump(dumpOpts) + if err != nil { + return fmt.Errorf("error running dump: %w", err) + } + if retention != "" { + if err := prune(core.PruneOptions{Targets: targets, Retention: retention}); err != nil { + return fmt.Errorf("error running prune: %w", err) + } + } + return nil + }); err != nil { + return fmt.Errorf("error running command: %w", err) } log.Info("Backup complete") return nil @@ -232,6 +256,8 @@ S3: If it is a URL of the format s3://bucketname/path then it will connect via S cmd.MarkFlagsMutuallyExclusive("once", "frequency") cmd.MarkFlagsMutuallyExclusive("cron", "begin") cmd.MarkFlagsMutuallyExclusive("cron", "frequency") + // retention + flags.String("retention", "", "Retention period for backups. Optional. If not specified, no pruning will be done. Can be number of backups or time-based. For time-based, the format is: 1d, 1w, 1m, 1y for days, weeks, months, years, respectively. For number-based, the format is: 1c, 2c, 3c, etc. for the count of backups to keep.") return cmd, nil } diff --git a/cmd/dump_test.go b/cmd/dump_test.go index 66d96fcc..b7b2ef13 100644 --- a/cmd/dump_test.go +++ b/cmd/dump_test.go @@ -25,63 +25,80 @@ func TestDumpCmd(t *testing.T) { wantErr bool expectedDumpOptions core.DumpOptions expectedTimerOptions core.TimerOptions + expectedPruneOptions *core.PruneOptions }{ - {"missing server and target options", []string{""}, "", true, core.DumpOptions{}, core.TimerOptions{}}, - {"invalid target URL", []string{"--server", "abc", "--target", "def"}, "", true, core.DumpOptions{DBConn: database.Connection{Host: "abc"}}, core.TimerOptions{}}, + // invalid ones + {"missing server and target options", []string{""}, "", true, core.DumpOptions{}, core.TimerOptions{}, nil}, + {"invalid target URL", []string{"--server", "abc", "--target", "def"}, "", true, core.DumpOptions{DBConn: database.Connection{Host: "abc"}}, core.TimerOptions{}, nil}, + + // file URL {"file URL", []string{"--server", "abc", "--target", "file:///foo/bar"}, "", false, core.DumpOptions{ Targets: []storage.Storage{file.New(*fileTargetURL)}, MaxAllowedPacket: defaultMaxAllowedPacket, Compressor: &compression.GzipCompressor{}, DBConn: database.Connection{Host: "abc"}, - }, core.TimerOptions{Frequency: defaultFrequency, Begin: defaultBegin}}, + }, core.TimerOptions{Frequency: defaultFrequency, Begin: defaultBegin}, nil}, + {"file URL with prune", []string{"--server", "abc", "--target", "file:///foo/bar", "--retention", "1h"}, "", false, core.DumpOptions{ + Targets: []storage.Storage{file.New(*fileTargetURL)}, + MaxAllowedPacket: defaultMaxAllowedPacket, + Compressor: &compression.GzipCompressor{}, + DBConn: database.Connection{Host: "abc"}, + }, core.TimerOptions{Frequency: defaultFrequency, Begin: defaultBegin}, &core.PruneOptions{Targets: []storage.Storage{file.New(*fileTargetURL)}, Retention: "1h"}}, + + // config file + {"config file", []string{"--config-file", "testdata/config.yml"}, "", false, core.DumpOptions{ + Targets: []storage.Storage{file.New(*fileTargetURL)}, + MaxAllowedPacket: defaultMaxAllowedPacket, + Compressor: &compression.GzipCompressor{}, + DBConn: database.Connection{Host: "abcd", Port: 3306, User: "user2", Pass: "xxxx2"}, + }, core.TimerOptions{Frequency: defaultFrequency, Begin: defaultBegin}, &core.PruneOptions{Targets: []storage.Storage{file.New(*fileTargetURL)}, Retention: "1h"}}, + + // timer options {"once flag", []string{"--server", "abc", "--target", "file:///foo/bar", "--once"}, "", false, core.DumpOptions{ Targets: []storage.Storage{file.New(*fileTargetURL)}, MaxAllowedPacket: defaultMaxAllowedPacket, Compressor: &compression.GzipCompressor{}, DBConn: database.Connection{Host: "abc"}, - }, core.TimerOptions{Frequency: defaultFrequency, Begin: defaultBegin, Once: true}}, + }, core.TimerOptions{Once: true, Frequency: defaultFrequency, Begin: defaultBegin}, nil}, {"cron flag", []string{"--server", "abc", "--target", "file:///foo/bar", "--cron", "0 0 * * *"}, "", false, core.DumpOptions{ Targets: []storage.Storage{file.New(*fileTargetURL)}, MaxAllowedPacket: defaultMaxAllowedPacket, Compressor: &compression.GzipCompressor{}, DBConn: database.Connection{Host: "abc"}, - }, core.TimerOptions{Frequency: defaultFrequency, Begin: defaultBegin, Cron: "0 0 * * *"}}, + }, core.TimerOptions{Frequency: defaultFrequency, Begin: defaultBegin, Cron: "0 0 * * *"}, nil}, {"begin flag", []string{"--server", "abc", "--target", "file:///foo/bar", "--begin", "1234"}, "", false, core.DumpOptions{ Targets: []storage.Storage{file.New(*fileTargetURL)}, MaxAllowedPacket: defaultMaxAllowedPacket, Compressor: &compression.GzipCompressor{}, DBConn: database.Connection{Host: "abc"}, - }, core.TimerOptions{Frequency: defaultFrequency, Begin: "1234"}}, + }, core.TimerOptions{Frequency: defaultFrequency, Begin: "1234"}, nil}, {"frequency flag", []string{"--server", "abc", "--target", "file:///foo/bar", "--frequency", "10"}, "", false, core.DumpOptions{ Targets: []storage.Storage{file.New(*fileTargetURL)}, MaxAllowedPacket: defaultMaxAllowedPacket, Compressor: &compression.GzipCompressor{}, DBConn: database.Connection{Host: "abc"}, - }, core.TimerOptions{Frequency: 10, Begin: defaultBegin}}, - {"config file", []string{"--config-file", "testdata/config.yml"}, "", false, core.DumpOptions{ - Targets: []storage.Storage{file.New(*fileTargetURL)}, - MaxAllowedPacket: defaultMaxAllowedPacket, - Compressor: &compression.GzipCompressor{}, - DBConn: database.Connection{Host: "abc", Port: 3306, User: "user", Pass: "xxxx"}, - }, core.TimerOptions{Frequency: defaultFrequency, Begin: defaultBegin}}, - {"incompatible flags: once/cron", []string{"--server", "abc", "--target", "file:///foo/bar", "--once", "--cron", "0 0 * * *"}, "", true, core.DumpOptions{}, core.TimerOptions{}}, - {"incompatible flags: once/begin", []string{"--server", "abc", "--target", "file:///foo/bar", "--once", "--begin", "1234"}, "", true, core.DumpOptions{}, core.TimerOptions{}}, - {"incompatible flags: once/frequency", []string{"--server", "abc", "--target", "file:///foo/bar", "--once", "--frequency", "10"}, "", true, core.DumpOptions{}, core.TimerOptions{}}, - {"incompatible flags: cron/begin", []string{"--server", "abc", "--target", "file:///foo/bar", "--cron", "0 0 * * *", "--begin", "1234"}, "", true, core.DumpOptions{}, core.TimerOptions{}}, - {"incompatible flags: cron/frequency", []string{"--server", "abc", "--target", "file:///foo/bar", "--cron", "0 0 * * *", "--frequency", "10"}, "", true, core.DumpOptions{}, core.TimerOptions{}}, + }, core.TimerOptions{Frequency: 10, Begin: defaultBegin}, nil}, + {"incompatible flags: once/cron", []string{"--server", "abc", "--target", "file:///foo/bar", "--once", "--cron", "0 0 * * *"}, "", true, core.DumpOptions{}, core.TimerOptions{}, nil}, + {"incompatible flags: once/begin", []string{"--server", "abc", "--target", "file:///foo/bar", "--once", "--begin", "1234"}, "", true, core.DumpOptions{}, core.TimerOptions{}, nil}, + {"incompatible flags: once/frequency", []string{"--server", "abc", "--target", "file:///foo/bar", "--once", "--frequency", "10"}, "", true, core.DumpOptions{}, core.TimerOptions{}, nil}, + {"incompatible flags: cron/begin", []string{"--server", "abc", "--target", "file:///foo/bar", "--cron", "0 0 * * *", "--begin", "1234"}, "", true, core.DumpOptions{}, core.TimerOptions{}, nil}, + {"incompatible flags: cron/frequency", []string{"--server", "abc", "--target", "file:///foo/bar", "--cron", "0 0 * * *", "--frequency", "10"}, "", true, core.DumpOptions{ + DBConn: database.Connection{Host: "abcd", Port: 3306, User: "user2", Pass: "xxxx2"}, + }, core.TimerOptions{Frequency: defaultFrequency, Begin: defaultBegin}, &core.PruneOptions{Targets: []storage.Storage{file.New(*fileTargetURL)}, Retention: "1h"}}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { m := newMockExecs() - m.On("timerDump", mock.MatchedBy(func(dumpOpts core.DumpOptions) bool { + m.On("dump", mock.MatchedBy(func(dumpOpts core.DumpOptions) bool { diff := deep.Equal(dumpOpts, tt.expectedDumpOptions) if diff == nil { return true } t.Errorf("dumpOpts compare failed: %v", diff) return false - }), mock.MatchedBy(func(timerOpts core.TimerOptions) bool { + })).Return(nil) + m.On("timer", mock.MatchedBy(func(timerOpts core.TimerOptions) bool { diff := deep.Equal(timerOpts, tt.expectedTimerOptions) if diff == nil { return true @@ -89,6 +106,16 @@ func TestDumpCmd(t *testing.T) { t.Errorf("timerOpts compare failed: %v", diff) return false })).Return(nil) + if tt.expectedPruneOptions != nil { + m.On("prune", mock.MatchedBy(func(pruneOpts core.PruneOptions) bool { + diff := deep.Equal(pruneOpts, *tt.expectedPruneOptions) + if diff == nil { + return true + } + t.Errorf("pruneOpts compare failed: %v", diff) + return false + })).Return(nil) + } cmd, err := rootCmd(m) if err != nil { diff --git a/cmd/prune.go b/cmd/prune.go new file mode 100644 index 00000000..9e77939b --- /dev/null +++ b/cmd/prune.go @@ -0,0 +1,139 @@ +package cmd + +import ( + "fmt" + "strings" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/spf13/viper" + + "github.com/databacker/mysql-backup/pkg/core" + "github.com/databacker/mysql-backup/pkg/storage" +) + +func pruneCmd(execs execs, cmdConfig *cmdConfiguration) (*cobra.Command, error) { + if cmdConfig == nil { + return nil, fmt.Errorf("cmdConfig is nil") + } + var v *viper.Viper + var cmd = &cobra.Command{ + Use: "prune", + Short: "prune older backups", + Long: `Prune older backups based on a retention period. Can be number of backups or time-based. + For time-based, the format is: 1d, 1w, 1m, 1y for days, weeks, months, years, respectively. + For number-based, the format is: 1c, 2c, 3c, etc. for the count of backups to keep. + + For time-based, prune always converts the time to hours, and then rounds up. This means that 2d is treated as 48h, and + any backups must be at least 48 full hours ago to be pruned. + `, + PreRun: func(cmd *cobra.Command, args []string) { + bindFlags(cmd, v) + }, + RunE: func(cmd *cobra.Command, args []string) error { + log.Debug("starting prune") + retention := v.GetString("retention") + targetURLs := v.GetStringSlice("target") + var ( + targets []storage.Storage + err error + ) + + if len(targetURLs) > 0 { + for _, t := range targetURLs { + store, err := storage.ParseURL(t, cmdConfig.creds) + if err != nil { + return fmt.Errorf("invalid target url: %v", err) + } + targets = append(targets, store) + } + } else { + // try the config file + if cmdConfig.configuration != nil { + // parse the target objects, then the ones listed for the backup + targetStructures := cmdConfig.configuration.Targets + dumpTargets := cmdConfig.configuration.Dump.Targets + for _, t := range dumpTargets { + var store storage.Storage + if target, ok := targetStructures[t]; !ok { + return fmt.Errorf("target %s from dump configuration not found in targets configuration", t) + } else { + store, err = target.Storage.Storage() + if err != nil { + return fmt.Errorf("target %s from dump configuration has invalid URL: %v", t, err) + } + } + targets = append(targets, store) + } + } + } + + if retention == "" && cmdConfig.configuration != nil { + retention = cmdConfig.configuration.Prune.Retention + } + + // timer options + once := v.GetBool("once") + if !v.IsSet("once") && cmdConfig.configuration != nil { + once = cmdConfig.configuration.Dump.Schedule.Once + } + cron := v.GetString("cron") + if cron == "" && cmdConfig.configuration != nil { + cron = cmdConfig.configuration.Dump.Schedule.Cron + } + begin := v.GetString("begin") + if begin == "" && cmdConfig.configuration != nil { + begin = cmdConfig.configuration.Dump.Schedule.Begin + } + frequency := v.GetInt("frequency") + if frequency == 0 && cmdConfig.configuration != nil { + frequency = cmdConfig.configuration.Dump.Schedule.Frequency + } + timerOpts := core.TimerOptions{ + Once: once, + Cron: cron, + Begin: begin, + Frequency: frequency, + } + + prune := core.Prune + timer := core.TimerCommand + if execs != nil { + prune = execs.prune + timer = execs.timer + } + if err := timer(timerOpts, func() error { + return prune(core.PruneOptions{Targets: targets, Retention: retention}) + }); err != nil { + return fmt.Errorf("error running prune: %w", err) + } + log.Info("Pruning complete") + return nil + }, + } + // target - where the backup is + v = viper.New() + v.SetEnvPrefix("db_restore") + v.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) + v.AutomaticEnv() + + flags := cmd.Flags() + flags.String("target", "", "full URL target to the directory where the backups are stored. Can be a file URL, or a reference to a target in the configuration file, e.g. `config://targetname`.") + + // retention + flags.String("retention", "", "Retention period for backups. REQUIRED. Can be number of backups or time-based. For time-based, the format is: 1d, 1w, 1m, 1y for days, weeks, months, years, respectively. For number-based, the format is: 1c, 2c, 3c, etc. for the count of backups to keep.") + + // frequency + flags.Int("frequency", defaultFrequency, "how often to run prunes, in minutes") + + // begin + flags.String("begin", defaultBegin, "What time to do the first prune. Must be in one of two formats: Absolute: HHMM, e.g. `2330` or `0415`; or Relative: +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half") + + // cron + flags.String("cron", "", "Set the prune schedule using standard [crontab syntax](https://en.wikipedia.org/wiki/Cron), a single line.") + + // once + flags.Bool("once", false, "Override all other settings and run the prune once immediately and exit. Useful if you use an external scheduler (e.g. as part of an orchestration solution like Cattle or Docker Swarm or [kubernetes cron jobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/)) and don't want the container to do the scheduling internally.") + + return cmd, nil +} diff --git a/cmd/prune_test.go b/cmd/prune_test.go new file mode 100644 index 00000000..99622b31 --- /dev/null +++ b/cmd/prune_test.go @@ -0,0 +1,60 @@ +package cmd + +import ( + "net/url" + "testing" + + "github.com/databacker/mysql-backup/pkg/core" + "github.com/databacker/mysql-backup/pkg/storage" + "github.com/databacker/mysql-backup/pkg/storage/file" + "github.com/go-test/deep" + "github.com/stretchr/testify/mock" +) + +func TestPruneCmd(t *testing.T) { + t.Parallel() + fileTarget := "file:///foo/bar" + fileTargetURL, _ := url.Parse(fileTarget) + + tests := []struct { + name string + args []string // "dump" will be prepended automatically + config string + wantErr bool + expectedPruneOptions core.PruneOptions + expectedTimerOptions core.TimerOptions + }{ + {"invalid target URL", []string{"--target", "def"}, "", true, core.PruneOptions{}, core.TimerOptions{}}, + {"file URL", []string{"--target", fileTarget, "--retention", "1h"}, "", false, core.PruneOptions{Targets: []storage.Storage{file.New(*fileTargetURL)}, Retention: "1h"}, core.TimerOptions{Frequency: defaultFrequency, Begin: defaultBegin}}, + {"config file", []string{"--config-file", "testdata/config.yml"}, "", false, core.PruneOptions{Targets: []storage.Storage{file.New(*fileTargetURL)}, Retention: "1h"}, core.TimerOptions{Frequency: defaultFrequency, Begin: defaultBegin}}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := newMockExecs() + m.On("prune", mock.MatchedBy(func(pruneOpts core.PruneOptions) bool { + diff := deep.Equal(pruneOpts, tt.expectedPruneOptions) + if diff == nil { + return true + } + t.Errorf("pruneOpts compare failed: %v", diff) + return false + })).Return(nil) + m.On("timer", tt.expectedTimerOptions).Return(nil) + cmd, err := rootCmd(m) + if err != nil { + t.Fatal(err) + } + cmd.SetArgs(append([]string{"prune"}, tt.args...)) + err = cmd.Execute() + switch { + case err == nil && tt.wantErr: + t.Fatal("missing error") + case err != nil && !tt.wantErr: + t.Fatal(err) + case err == nil: + m.AssertExpectations(t) + } + }) + } +} diff --git a/cmd/restore.go b/cmd/restore.go index 8d52275b..12c1b14a 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -14,7 +14,10 @@ import ( "github.com/databacker/mysql-backup/pkg/util" ) -func restoreCmd(execs execs) (*cobra.Command, error) { +func restoreCmd(execs execs, cmdConfig *cmdConfiguration) (*cobra.Command, error) { + if cmdConfig == nil { + return nil, fmt.Errorf("cmdConfig is nil") + } var v *viper.Viper var cmd = &cobra.Command{ Use: "restore", @@ -47,8 +50,8 @@ func restoreCmd(execs execs) (*cobra.Command, error) { compressor compression.Compressor err error ) - if configuration != nil { - compressionAlgo = configuration.Dump.Compression + if cmdConfig.configuration != nil { + compressionAlgo = cmdConfig.configuration.Dump.Compression } compressionVar := v.GetString("compression") if compressionVar != "" { @@ -73,10 +76,10 @@ func restoreCmd(execs execs) (*cobra.Command, error) { // get the target name targetName := u.Host // get the target from the config file - if configuration == nil { + if cmdConfig.configuration == nil { return fmt.Errorf("no configuration file found") } - if target, ok := configuration.Targets[targetName]; !ok { + if target, ok := cmdConfig.configuration.Targets[targetName]; !ok { return fmt.Errorf("target %s not found in configuration", targetName) } else { if store, err = target.Storage.Storage(); err != nil { @@ -86,7 +89,7 @@ func restoreCmd(execs execs) (*cobra.Command, error) { // need to add the path to the specific target file } else { // parse the target URL - store, err = storage.ParseURL(target, creds) + store, err = storage.ParseURL(target, cmdConfig.creds) if err != nil { return fmt.Errorf("invalid target url: %v", err) } @@ -95,7 +98,7 @@ func restoreCmd(execs execs) (*cobra.Command, error) { if execs != nil { restore = execs.restore } - if err := restore(store, targetFile, dbconn, databasesMap, compressor); err != nil { + if err := restore(store, targetFile, cmdConfig.dbconn, databasesMap, compressor); err != nil { return fmt.Errorf("error restoring: %v", err) } log.Info("Restore complete") diff --git a/cmd/restore_test.go b/cmd/restore_test.go index f89049e8..98d71733 100644 --- a/cmd/restore_test.go +++ b/cmd/restore_test.go @@ -30,7 +30,7 @@ func TestRestoreCmd(t *testing.T) { {"missing server and target options", []string{""}, "", true, nil, "", database.Connection{}, nil, &compression.GzipCompressor{}}, {"invalid target URL", []string{"--server", "abc", "--target", "def"}, "", true, nil, "", database.Connection{Host: "abc"}, nil, &compression.GzipCompressor{}}, {"valid URL missing dump filename", []string{"--server", "abc", "--target", "file:///foo/bar"}, "", true, nil, "", database.Connection{Host: "abc"}, nil, &compression.GzipCompressor{}}, - {"valid file URL", []string{"--server", "abc", "--target", fileTarget, "filename.tgz"}, "", false, file.New(*fileTargetURL), "filename.tgz", database.Connection{Host: "abc"}, map[string]string{}, &compression.GzipCompressor{}}, + {"valid file URL", []string{"--server", "abc", "--target", fileTarget, "filename.tgz", "--verbose", "2"}, "", false, file.New(*fileTargetURL), "filename.tgz", database.Connection{Host: "abc"}, map[string]string{}, &compression.GzipCompressor{}}, } for _, tt := range tests { @@ -50,7 +50,6 @@ func TestRestoreCmd(t *testing.T) { t.Fatal(err) case err == nil: m.AssertExpectations(t) - //m.AssertCalled(t, "restore", tt.expectedTarget, tt.expectedTargetFile, tt.expectedDbconn, tt.expectedDatabasesMap, tt.expectedCompressor) } }) diff --git a/cmd/root.go b/cmd/root.go index fa88bce2..45198616 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -19,28 +19,31 @@ import ( ) type execs interface { - timerDump(opts core.DumpOptions, timerOpts core.TimerOptions) error + dump(opts core.DumpOptions) error restore(target storage.Storage, targetFile string, dbconn database.Connection, databasesMap map[string]string, compressor compression.Compressor) error + prune(opts core.PruneOptions) error + timer(timerOpts core.TimerOptions, cmd func() error) error } -type subCommand func(execs) (*cobra.Command, error) +type subCommand func(execs, *cmdConfiguration) (*cobra.Command, error) -var subCommands = []subCommand{dumpCmd, restoreCmd} +var subCommands = []subCommand{dumpCmd, restoreCmd, pruneCmd} -const ( - defaultPort = 3306 -) - -var ( +type cmdConfiguration struct { dbconn database.Connection creds credentials.Creds configuration *config.Config +} + +const ( + defaultPort = 3306 ) func rootCmd(execs execs) (*cobra.Command, error) { var ( - v *viper.Viper - cmd *cobra.Command + v *viper.Viper + cmd *cobra.Command + cmdConfig = &cmdConfiguration{} ) cmd = &cobra.Command{ Use: "mysql-backup", @@ -82,48 +85,48 @@ func rootCmd(execs execs) (*cobra.Command, error) { if err := decoder.Decode(&config); err != nil { return fmt.Errorf("fatal error config file: %w", err) } - configuration = &config + cmdConfig.configuration = &config } // the structure of our config file is more complex and with relationships than our config/env var // so we cannot use a single viper structure, as described above. // set up database connection - if configuration != nil { - if configuration.Database.Server != "" { - dbconn.Host = configuration.Database.Server + if cmdConfig.configuration != nil { + if cmdConfig.configuration.Database.Server != "" { + cmdConfig.dbconn.Host = cmdConfig.configuration.Database.Server } - if configuration.Database.Port != 0 { - dbconn.Port = configuration.Database.Port + if cmdConfig.configuration.Database.Port != 0 { + cmdConfig.dbconn.Port = cmdConfig.configuration.Database.Port } - if configuration.Database.Credentials.Username != "" { - dbconn.User = configuration.Database.Credentials.Username + if cmdConfig.configuration.Database.Credentials.Username != "" { + cmdConfig.dbconn.User = cmdConfig.configuration.Database.Credentials.Username } - if configuration.Database.Credentials.Password != "" { - dbconn.Pass = configuration.Database.Credentials.Password + if cmdConfig.configuration.Database.Credentials.Password != "" { + cmdConfig.dbconn.Pass = cmdConfig.configuration.Database.Credentials.Password } } // override config with env var or CLI flag, if set dbHost := v.GetString("server") if dbHost != "" && v.IsSet("server") { - dbconn.Host = dbHost + cmdConfig.dbconn.Host = dbHost } dbPort := v.GetInt("port") if dbPort != 0 && v.IsSet("port") { - dbconn.Port = dbPort + cmdConfig.dbconn.Port = dbPort } dbUser := v.GetString("user") if dbUser != "" && v.IsSet("user") { - dbconn.User = dbUser + cmdConfig.dbconn.User = dbUser } dbPass := v.GetString("pass") if dbPass != "" && v.IsSet("pass") { - dbconn.Pass = dbPass + cmdConfig.dbconn.Pass = dbPass } // these are not from the config file, as they are generic credentials, used across all targets. // the config file uses specific ones per target - creds = credentials.Creds{ + cmdConfig.creds = credentials.Creds{ AWSEndpoint: v.GetString("aws-endpoint-url"), SMBCredentials: credentials.SMBCreds{ Username: v.GetString("smb-user"), @@ -173,7 +176,7 @@ func rootCmd(execs execs) (*cobra.Command, error) { pflags.String("smb-domain", "", "SMB domain") for _, subCmd := range subCommands { - if sc, err := subCmd(execs); err != nil { + if sc, err := subCmd(execs, cmdConfig); err != nil { return nil, err } else { cmd.AddCommand(sc) diff --git a/cmd/testdata/config.yml b/cmd/testdata/config.yml index 096fe76a..a08d752d 100644 --- a/cmd/testdata/config.yml +++ b/cmd/testdata/config.yml @@ -1,9 +1,9 @@ database: - server: abc + server: abcd port: 3306 credentials: - username: user - password: xxxx + username: user2 + password: xxxx2 targets: local: @@ -16,3 +16,6 @@ targets: dump: targets: - local + +prune: + retention: "1h" \ No newline at end of file diff --git a/pkg/config/type.go b/pkg/config/type.go index 70cdc590..ee21f084 100644 --- a/pkg/config/type.go +++ b/pkg/config/type.go @@ -34,6 +34,7 @@ type Config struct { Restore Restore `yaml:"restore"` Database Database `yaml:"database"` Targets Targets `yaml:"targets"` + Prune Prune `yaml:"prune"` } type Dump struct { @@ -51,6 +52,10 @@ type Dump struct { Targets []string `yaml:"targets"` } +type Prune struct { + Retention string `yaml:"retention"` +} + type Schedule struct { Once bool `yaml:"once"` Cron string `yaml:"cron"` diff --git a/pkg/core/dump.go b/pkg/core/dump.go index 141b63e1..19f25036 100644 --- a/pkg/core/dump.go +++ b/pkg/core/dump.go @@ -19,25 +19,6 @@ const ( targetRenameCmd = "/scripts.d/target.sh" ) -// TimerDump runs a dump on a timer -func TimerDump(opts DumpOptions, timerOpts TimerOptions) error { - c, err := Timer(timerOpts) - if err != nil { - log.Errorf("error creating timer: %v", err) - os.Exit(1) - } - // block and wait for it - for update := range c { - if err := Dump(opts); err != nil { - return fmt.Errorf("error backing up: %w", err) - } - if update.Last { - break - } - } - return nil -} - // Dump run a single dump, based on the provided opts func Dump(opts DumpOptions) error { targets := opts.Targets diff --git a/pkg/core/prune.go b/pkg/core/prune.go new file mode 100644 index 00000000..82475c2a --- /dev/null +++ b/pkg/core/prune.go @@ -0,0 +1,180 @@ +package core + +import ( + "errors" + "fmt" + "regexp" + "slices" + "strconv" + "time" + + log "github.com/sirupsen/logrus" +) + +// filenameRE is a regular expression to match a backup filename +var filenameRE = regexp.MustCompile(`^db_backup_(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})Z\.\w+$`) + +// Prune prune older backups +func Prune(opts PruneOptions) error { + log.Info("beginning prune") + var ( + candidates []string + now = opts.Now + ) + if now.IsZero() { + now = time.Now() + } + retainHours, err1 := convertToHours(opts.Retention) + retainCount, err2 := convertToCount(opts.Retention) + if err1 != nil && err2 != nil { + return fmt.Errorf("invalid retention string: %s", opts.Retention) + } + if len(opts.Targets) == 0 { + return errors.New("no targets") + } + + for _, target := range opts.Targets { + var pruned int + + log.Debugf("pruning target %s", target) + files, err := target.ReadDir(".") + if err != nil { + return fmt.Errorf("failed to read directory: %v", err) + } + + // create a slice with the filenames and their calculated times - these are *not* the timestamp times, but the times calculated from the filenames + var filesWithTimes []fileWithTime + + for _, fileInfo := range files { + filename := fileInfo.Name() + matches := filenameRE.FindStringSubmatch(filename) + if matches == nil { + log.Debugf("ignoring filename that is not standard backup pattern: %s", filename) + continue + } + log.Debugf("checking filename that is standard backup pattern: %s", filename) + + // Parse the date from the filename + year, month, day, hour, minute, second := matches[1], matches[2], matches[3], matches[4], matches[5], matches[6] + dateTimeStr := fmt.Sprintf("%s-%s-%sT%s:%s:%sZ", year, month, day, hour, minute, second) + filetime, err := time.Parse(time.RFC3339, dateTimeStr) + if err != nil { + log.Debugf("Error parsing date from filename %s: %v; ignoring", filename, err) + continue + } + filesWithTimes = append(filesWithTimes, fileWithTime{ + filename: filename, + filetime: filetime, + }) + } + + switch { + case retainHours > 0: + // if we had retainHours, we go through all of the files and find any whose timestamp is older than now-retainHours + for _, f := range filesWithTimes { + // Check if the file is within 'retain' hours from 'now' + age := now.Sub(f.filetime).Hours() + if age < float64(retainHours) { + log.Debugf("file %s is %f hours old", f.filename, age) + log.Debugf("keeping file %s", f.filename) + continue + } + log.Debugf("Adding candidate file: %s", f.filename) + candidates = append(candidates, f.filename) + } + case retainCount > 0: + // if we had retainCount, we sort all of the files by timestamp, and add to the list all except the retainCount most recent + slices.SortFunc(filesWithTimes, func(i, j fileWithTime) int { + switch { + case i.filetime.Before(j.filetime): + return -1 + case i.filetime.After(j.filetime): + return 1 + } + return 0 + }) + slices.Reverse(filesWithTimes) + if retainCount >= len(filesWithTimes) { + for i := 0 + retainCount; i < len(filesWithTimes); i++ { + log.Debugf("Adding candidate file %s:", filesWithTimes[i].filename) + candidates = append(candidates, filesWithTimes[i].filename) + } + } + default: + return fmt.Errorf("invalid retention string: %s", opts.Retention) + } + + // we have the list, remove them all + for _, filename := range candidates { + if err := target.Remove(filename); err != nil { + return fmt.Errorf("failed to remove file %s: %v", filename, err) + } + pruned++ + } + log.Debugf("pruning %d files from target %s", pruned, target) + } + + return nil +} + +// convertToHours takes a string with format "" and converts it to hours. +// The unit can be 'h' (hours), 'd' (days), 'w' (weeks), 'm' (months), 'y' (years). +// Assumes 30 days in a month and 365 days in a year for conversion. +func convertToHours(input string) (int, error) { + re := regexp.MustCompile(`^(\d+)([hdwmy])$`) + matches := re.FindStringSubmatch(input) + + if matches == nil { + return 0, fmt.Errorf("invalid format: %s", input) + } + + value, err := strconv.Atoi(matches[1]) + if err != nil { + return 0, fmt.Errorf("invalid number: %s", matches[1]) + } + + unit := matches[2] + switch unit { + case "h": + return value, nil + case "d": + return value * 24, nil + case "w": + return value * 24 * 7, nil + case "m": + return value * 24 * 30, nil // Approximation + case "y": + return value * 24 * 365, nil // Approximation + default: + return 0, errors.New("invalid unit") + } +} + +// convertToCount takes a string with format "" and converts it to count. +// The unit can be 'c' (count) +func convertToCount(input string) (int, error) { + re := regexp.MustCompile(`^(\d+)([c])$`) + matches := re.FindStringSubmatch(input) + + if matches == nil { + return 0, fmt.Errorf("invalid format: %s", input) + } + + value, err := strconv.Atoi(matches[1]) + if err != nil { + return 0, fmt.Errorf("invalid number: %s", matches[1]) + } + + unit := matches[2] + switch unit { + case "c": + return value, nil + default: + return 0, errors.New("invalid unit") + } +} + +type fileWithTime struct { + filename string + filetime time.Time +} diff --git a/pkg/core/prune_test.go b/pkg/core/prune_test.go new file mode 100644 index 00000000..9197c821 --- /dev/null +++ b/pkg/core/prune_test.go @@ -0,0 +1,132 @@ +package core + +import ( + "fmt" + "os" + "slices" + "testing" + "time" + + "github.com/databacker/mysql-backup/pkg/storage" + "github.com/databacker/mysql-backup/pkg/storage/credentials" + "github.com/stretchr/testify/assert" +) + +func TestConvertToHours(t *testing.T) { + tests := []struct { + input string + output int + err error + }{ + {"2h", 2, nil}, + {"3w", 3 * 7 * 24, nil}, + {"5d", 5 * 24, nil}, + {"1m", 30 * 24, nil}, + {"1y", 365 * 24, nil}, + {"100x", 0, fmt.Errorf("invalid format: 100x")}, + } + for _, tt := range tests { + hours, err := convertToHours(tt.input) + switch { + case (err == nil && tt.err != nil) || (err != nil && tt.err == nil): + t.Errorf("expected error %v, got %v", tt.err, err) + case err != nil && tt.err != nil && err.Error() != tt.err.Error(): + t.Errorf("expected error %v, got %v", tt.err, err) + case hours != tt.output: + t.Errorf("input %s expected %d, got %d", tt.input, tt.output, hours) + } + } +} + +func TestPrune(t *testing.T) { + // we use a fixed list of file before, and a subset of them for after + // db_backup_YYYY-MM-DDTHH:mm:ssZ. + // our list of timestamps should give us these files, of the following time ago: + // 0.25h, 1h, 2h, 3h, 24h (1d), 36h (1.5d), 48h (2d), 60h (2.5d) 72h(3d), + // 167h (1w-1h), 168h (1w), 240h (1.5w) 336h (2w), 576h (2.5w), 504h (3w) + // 744h (3.5w), 720h (1m), 1000h (1.5m), 1440h (2m), 1800h (2.5m), 2160h (3m), + // 8760h (1y), 12000h (1.5y), 17520h (2y) + // we use a fixed starting time to make it consistent. + now := time.Date(2021, 1, 1, 0, 30, 0, 0, time.UTC) + hoursAgo := []float32{0.25, 1, 2, 3, 24, 36, 48, 60, 72, 167, 168, 240, 336, 504, 576, 744, 720, 1000, 1440, 1800, 2160, 8760, 12000, 17520} + // convert to filenames + var filenames []string + for _, h := range hoursAgo { + // convert the time diff into a duration, do not forget the negative + duration, err := time.ParseDuration(fmt.Sprintf("-%fh", h)) + if err != nil { + t.Fatalf("failed to parse duration: %v", err) + } + // convert it into a time.Time + // and add 30 mins to our "now" time. + relativeTime := now.Add(duration).Add(-30 * time.Minute) + // convert that into the filename + filename := fmt.Sprintf("db_backup_%sZ.gz", relativeTime.Format("2006-01-02T15:04:05")) + filenames = append(filenames, filename) + } + tests := []struct { + name string + opts PruneOptions + beforeFiles []string + afterFiles []string + err error + }{ + {"invalid format", PruneOptions{Retention: "100x", Now: now}, nil, nil, fmt.Errorf("invalid retention string: 100x")}, + {"no targets", PruneOptions{Retention: "1h", Now: now}, nil, nil, fmt.Errorf("no targets")}, + // 1 hour - file[1] is 1h+30m = 1.5h, so it should be pruned + {"1 hour", PruneOptions{Retention: "1h", Now: now}, filenames, filenames[0:1], nil}, + // 2 hours - file[2] is 2h+30m = 2.5h, so it should be pruned + {"2 hours", PruneOptions{Retention: "2h", Now: now}, filenames, filenames[0:2], nil}, + // 2 days - file[6] is 48h+30m = 48.5h, so it should be pruned + {"2 days", PruneOptions{Retention: "2d", Now: now}, filenames, filenames[0:6], nil}, + // 3 weeks - file[13] is 504h+30m = 504.5h, so it should be pruned + {"3 weeks", PruneOptions{Retention: "3w", Now: now}, filenames, filenames[0:13], nil}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // create a temporary directory + workDir := t.TempDir() + // create beforeFiles in the directory and create a target, but only if there are beforeFiles + // this lets us also test no targets, which should generate an error + if len(tt.beforeFiles) > 0 { + for _, filename := range tt.beforeFiles { + if err := os.WriteFile(fmt.Sprintf("%s/%s", workDir, filename), nil, 0644); err != nil { + t.Errorf("failed to create file %s: %v", filename, err) + return + } + } + + // add our tempdir as the target + store, err := storage.ParseURL(fmt.Sprintf("file://%s", workDir), credentials.Creds{}) + if err != nil { + t.Errorf("failed to parse url: %v", err) + return + } + + tt.opts.Targets = append(tt.opts.Targets, store) + } + + // run Prune + err := Prune(tt.opts) + switch { + case (err == nil && tt.err != nil) || (err != nil && tt.err == nil): + t.Errorf("expected error %v, got %v", tt.err, err) + case err != nil && tt.err != nil && err.Error() != tt.err.Error(): + t.Errorf("expected error %v, got %v", tt.err, err) + } + // check files match + files, err := os.ReadDir(workDir) + if err != nil { + t.Errorf("failed to read directory: %v", err) + return + } + var afterFiles []string + for _, file := range files { + afterFiles = append(afterFiles, file.Name()) + } + slices.Sort(afterFiles) + slices.Sort(tt.afterFiles) + assert.ElementsMatch(t, tt.afterFiles, afterFiles) + }) + } +} diff --git a/pkg/core/pruneoptions.go b/pkg/core/pruneoptions.go new file mode 100644 index 00000000..07efd365 --- /dev/null +++ b/pkg/core/pruneoptions.go @@ -0,0 +1,13 @@ +package core + +import ( + "time" + + "github.com/databacker/mysql-backup/pkg/storage" +) + +type PruneOptions struct { + Targets []storage.Storage + Retention string + Now time.Time +} diff --git a/pkg/core/timer.go b/pkg/core/timer.go index 3f455bb3..08508213 100644 --- a/pkg/core/timer.go +++ b/pkg/core/timer.go @@ -2,11 +2,13 @@ package core import ( "fmt" + "os" "regexp" "strconv" "time" "github.com/robfig/cron/v3" + log "github.com/sirupsen/logrus" ) type TimerOptions struct { @@ -150,3 +152,22 @@ func waitForCron(cronExpr string, from time.Time) (time.Duration, error) { next := sched.Next(from.Add(-1 * time.Nanosecond)) return next.Sub(from), nil } + +// TimerCommand runs a command on a timer +func TimerCommand(timerOpts TimerOptions, cmd func() error) error { + c, err := Timer(timerOpts) + if err != nil { + log.Errorf("error creating timer: %v", err) + os.Exit(1) + } + // block and wait for it + for update := range c { + if err := cmd(); err != nil { + return fmt.Errorf("error running command: %w", err) + } + if update.Last { + break + } + } + return nil +} diff --git a/pkg/storage/file/file.go b/pkg/storage/file/file.go index 15359420..cc5653c8 100644 --- a/pkg/storage/file/file.go +++ b/pkg/storage/file/file.go @@ -2,6 +2,7 @@ package file import ( "io" + "io/fs" "net/url" "os" "path" @@ -33,6 +34,27 @@ func (f *File) URL() string { return f.url.String() } +func (f *File) ReadDir(dirname string) ([]fs.FileInfo, error) { + + entries, err := os.ReadDir(filepath.Join(f.path, dirname)) + if err != nil { + return nil, err + } + var files []fs.FileInfo + for _, entry := range entries { + info, err := entry.Info() + if err != nil { + return nil, err + } + files = append(files, info) + } + return files, nil +} + +func (f *File) Remove(target string) error { + return os.Remove(filepath.Join(f.path, target)) +} + // copyFile copy a file from to as efficiently as possible func copyFile(from, to string) (int64, error) { src, err := os.Open(from) diff --git a/pkg/storage/s3/s3.go b/pkg/storage/s3/s3.go index de405453..2482dfdc 100644 --- a/pkg/storage/s3/s3.go +++ b/pkg/storage/s3/s3.go @@ -3,9 +3,11 @@ package s3 import ( "context" "fmt" + "io/fs" "net/url" "os" "path" + "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/config" @@ -139,6 +141,57 @@ func (s *S3) URL() string { return s.url.String() } +func (s *S3) ReadDir(dirname string) ([]fs.FileInfo, error) { + // Get the AWS config + cfg, err := getConfig(s.endpoint) + if err != nil { + return nil, fmt.Errorf("failed to load AWS config: %v", err) + } + + // Create a new S3 service client + svc := s3.NewFromConfig(cfg) + + // Call ListObjectsV2 with your bucket and prefix + result, err := svc.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{Bucket: aws.String(s.url.Hostname()), Prefix: aws.String(dirname)}) + if err != nil { + return nil, fmt.Errorf("failed to list objects, %v", err) + } + + // Convert s3.Object to fs.FileInfo + var files []fs.FileInfo + for _, item := range result.Contents { + files = append(files, &s3FileInfo{ + name: *item.Key, + lastModified: *item.LastModified, + size: item.Size, + }) + } + + return files, nil +} + +func (s *S3) Remove(target string) error { + // Get the AWS config + cfg, err := getConfig(s.endpoint) + if err != nil { + return fmt.Errorf("failed to load AWS config: %v", err) + } + + // Create a new S3 service client + svc := s3.NewFromConfig(cfg) + + // Call DeleteObject with your bucket and the key of the object you want to delete + _, err = svc.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ + Bucket: aws.String(s.url.Hostname()), + Key: aws.String(target), + }) + if err != nil { + return fmt.Errorf("failed to delete object, %v", err) + } + + return nil +} + func getEndpoint(endpoint string) string { // for some reason, the lookup gets flaky when the endpoint is 127.0.0.1 // so you have to set it to localhost explicitly. @@ -174,3 +227,16 @@ func getConfig(endpoint string) (aws.Config, error) { ) } + +type s3FileInfo struct { + name string + lastModified time.Time + size int64 +} + +func (s s3FileInfo) Name() string { return s.name } +func (s s3FileInfo) Size() int64 { return s.size } +func (s s3FileInfo) Mode() os.FileMode { return 0 } // Not applicable in S3 +func (s s3FileInfo) ModTime() time.Time { return s.lastModified } +func (s s3FileInfo) IsDir() bool { return false } // Not applicable in S3 +func (s s3FileInfo) Sys() interface{} { return nil } // Not applicable in S3 diff --git a/pkg/storage/smb/smb.go b/pkg/storage/smb/smb.go index f2dde505..eab2ca65 100644 --- a/pkg/storage/smb/smb.go +++ b/pkg/storage/smb/smb.go @@ -50,11 +50,50 @@ func New(u url.URL, opts ...Option) *SMB { } func (s *SMB) Pull(source, target string) (int64, error) { - return s.command(false, s.url, source, target) + var ( + copied int64 + err error + ) + err = s.exec(s.url, func(fs *smb2.Share, sharepath string) error { + smbFilename := fmt.Sprintf("%s%c%s", sharepath, smb2.PathSeparator, filepath.Base(strings.ReplaceAll(target, ":", "-"))) + + to, err := os.Create(target) + if err != nil { + return err + } + defer to.Close() + from, err := fs.Open(smbFilename) + if err != nil { + return err + } + defer from.Close() + copied, err = io.Copy(to, from) + return err + }) + return copied, err } func (s *SMB) Push(target, source string) (int64, error) { - return s.command(true, s.url, target, source) + var ( + copied int64 + err error + ) + err = s.exec(s.url, func(fs *smb2.Share, sharepath string) error { + smbFilename := fmt.Sprintf("%s%c%s", sharepath, smb2.PathSeparator, filepath.Base(strings.ReplaceAll(target, ":", "-"))) + from, err := os.Open(source) + if err != nil { + return err + } + defer from.Close() + to, err := fs.Create(smbFilename) + if err != nil { + return err + } + defer to.Close() + copied, err = io.Copy(to, from) + return err + }) + return copied, err } func (s *SMB) Protocol() string { @@ -65,7 +104,26 @@ func (s *SMB) URL() string { return s.url.String() } -func (s *SMB) command(push bool, u url.URL, remoteFilename, filename string) (int64, error) { +func (s *SMB) ReadDir(dirname string) ([]os.FileInfo, error) { + var ( + err error + infos []os.FileInfo + ) + err = s.exec(s.url, func(fs *smb2.Share, sharepath string) error { + infos, err = fs.ReadDir(sharepath) + return err + }) + return infos, err +} + +func (s *SMB) Remove(target string) error { + return s.exec(s.url, func(fs *smb2.Share, sharepath string) error { + smbFilename := fmt.Sprintf("%s%c%s", sharepath, smb2.PathSeparator, filepath.Base(strings.ReplaceAll(target, ":", "-"))) + return fs.Remove(smbFilename) + }) +} + +func (s *SMB) exec(u url.URL, command func(fs *smb2.Share, sharepath string) error) error { var ( username, password, domain string ) @@ -86,7 +144,7 @@ func (s *SMB) command(push bool, u url.URL, remoteFilename, filename string) (in conn, err := net.Dial("tcp", host) if err != nil { - return 0, err + return err } defer conn.Close() @@ -100,7 +158,7 @@ func (s *SMB) command(push bool, u url.URL, remoteFilename, filename string) (in smbConn, err := d.Dial(conn) if err != nil { - return 0, err + return err } defer func() { _ = smbConn.Logoff() @@ -108,42 +166,12 @@ func (s *SMB) command(push bool, u url.URL, remoteFilename, filename string) (in fs, err := smbConn.Mount(share) if err != nil { - return 0, err + return err } defer func() { _ = fs.Umount() }() - - smbFilename := fmt.Sprintf("%s%c%s", sharepath, smb2.PathSeparator, filepath.Base(strings.ReplaceAll(remoteFilename, ":", "-"))) - - var ( - from io.ReadCloser - to io.WriteCloser - ) - if push { - from, err = os.Open(filename) - if err != nil { - return 0, err - } - defer from.Close() - to, err = fs.Create(smbFilename) - if err != nil { - return 0, err - } - defer to.Close() - } else { - to, err = os.Create(filename) - if err != nil { - return 0, err - } - defer to.Close() - from, err = fs.Open(smbFilename) - if err != nil { - return 0, err - } - defer from.Close() - } - return io.Copy(to, from) + return command(fs, sharepath) } // parseSMBDomain parse a username to get an SMB domain diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 2d7cecf3..15b244bf 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -1,8 +1,13 @@ package storage +import "io/fs" + type Storage interface { Push(target, source string) (int64, error) Pull(source, target string) (int64, error) Protocol() string URL() string + ReadDir(dirname string) ([]fs.FileInfo, error) + // Remove remove a particular file + Remove(string) error } diff --git a/test/backup_test.go b/test/backup_test.go index 0ab2e71a..f0de662c 100644 --- a/test/backup_test.go +++ b/test/backup_test.go @@ -442,7 +442,9 @@ func runDumpTest(dc *dockerContext, compact bool, base string, targets []backupT timerOpts := core.TimerOptions{ Once: true, } - return core.TimerDump(dumpOpts, timerOpts) + return core.TimerCommand(timerOpts, func() error { + return core.Dump(dumpOpts) + }) } func setup(dc *dockerContext, base, backupFile, compactBackupFile string) (mysql, smb containerPort, s3url string, s3backend gofakes3.Backend, err error) { From 8c6fa4e295f261e38359fea048d13e3e180dd126 Mon Sep 17 00:00:00 2001 From: Avi Deitcher Date: Sun, 17 Mar 2024 17:43:23 +0200 Subject: [PATCH 7/8] udpate sample configs Signed-off-by: Avi Deitcher --- sample-configs/{config.yaml => local.yaml} | 6 ++++-- sample-configs/{remote-config.yaml => remote.yaml} | 6 +++++- sample-configs/telemetry-only.yaml | 5 +++-- 3 files changed, 12 insertions(+), 5 deletions(-) rename sample-configs/{config.yaml => local.yaml} (92%) rename sample-configs/{remote-config.yaml => remote.yaml} (84%) diff --git a/sample-configs/config.yaml b/sample-configs/local.yaml similarity index 92% rename from sample-configs/config.yaml rename to sample-configs/local.yaml index a205965a..3193e72f 100644 --- a/sample-configs/config.yaml +++ b/sample-configs/local.yaml @@ -1,5 +1,7 @@ -# sample configuration file for entire local config, not using remote config service -# will be overridden by command-line arguments +# sample configuration file for config entirely local, not using remote config service +# can be overridden by command-line arguments + +# standard reference of type and version type: config.databack.io version: 1 diff --git a/sample-configs/remote-config.yaml b/sample-configs/remote.yaml similarity index 84% rename from sample-configs/remote-config.yaml rename to sample-configs/remote.yaml index eed1fda1..abf3d739 100644 --- a/sample-configs/remote-config.yaml +++ b/sample-configs/remote.yaml @@ -1,4 +1,8 @@ -# sample configuration file for all config provided by remote service +# sample configuration file for config entirely from remote service. When retrieving from remote, +# will also be told how to handle telemetry. +# can be overridden by command-line arguments + +# standard reference of type and version type: config.databack.io version: 1 diff --git a/sample-configs/telemetry-only.yaml b/sample-configs/telemetry-only.yaml index 1d587fe0..1de7a6b6 100644 --- a/sample-configs/telemetry-only.yaml +++ b/sample-configs/telemetry-only.yaml @@ -1,10 +1,11 @@ # sample configuration file for telemetry service only; everything else is local -# will be overridden by command-line arguments -# or remote, if configured +# can be overridden by command-line arguments or remote, if configured # only needed if registered to send logs and results to a telemetry service # and not defined in the config service. Normally, you can just use the config # to get the telemetry info + +# standard reference of type and version type: config.databack.io version: 1 From 7bb42b40dd12264d6785a191c7f4ca934b11fdf4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Mar 2024 17:25:36 +0000 Subject: [PATCH 8/8] Bump github.com/docker/docker Bumps [github.com/docker/docker](https://github.com/docker/docker) from 24.0.7+incompatible to 24.0.9+incompatible. - [Release notes](https://github.com/docker/docker/releases) - [Commits](https://github.com/docker/docker/compare/v24.0.7...v24.0.9) --- updated-dependencies: - dependency-name: github.com/docker/docker dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8bc97177..39c14229 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/aws/aws-sdk-go-v2/config v1.18.30 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.74 github.com/aws/aws-sdk-go-v2/service/s3 v1.37.1 - github.com/docker/docker v24.0.7+incompatible + github.com/docker/docker v24.0.9+incompatible github.com/docker/go-connections v0.4.0 github.com/go-sql-driver/mysql v1.7.1 github.com/johannesboyne/gofakes3 v0.0.0-20230506070712-04da935ef877 diff --git a/go.sum b/go.sum index ebdae3c4..af0b9d8a 100644 --- a/go.sum +++ b/go.sum @@ -74,8 +74,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= -github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0= +github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=