diff --git a/cmd/common_test.go b/cmd/common_test.go index adaeedfe..05790e95 100644 --- a/cmd/common_test.go +++ b/cmd/common_test.go @@ -19,9 +19,9 @@ func newMockExecs() *mockExecs { return m } -func (m *mockExecs) Dump(opts core.DumpOptions) error { +func (m *mockExecs) Dump(opts core.DumpOptions) (core.DumpResults, error) { args := m.Called(opts) - return args.Error(0) + return core.DumpResults{}, args.Error(0) } func (m *mockExecs) Restore(opts core.RestoreOptions) error { diff --git a/cmd/dump.go b/cmd/dump.go index cabc4445..cc34fe1a 100644 --- a/cmd/dump.go +++ b/cmd/dump.go @@ -199,7 +199,7 @@ func dumpCmd(passedExecs execs, cmdConfig *cmdConfiguration) (*cobra.Command, er Run: uid, FilenamePattern: filenamePattern, } - err := executor.Dump(dumpOpts) + _, err := executor.Dump(dumpOpts) if err != nil { return fmt.Errorf("error running dump: %w", err) } diff --git a/cmd/root.go b/cmd/root.go index bae60697..b0a72b7c 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -19,7 +19,7 @@ import ( type execs interface { SetLogger(logger *log.Logger) GetLogger() *log.Logger - Dump(opts core.DumpOptions) error + Dump(opts core.DumpOptions) (core.DumpResults, error) Restore(opts core.RestoreOptions) error Prune(opts core.PruneOptions) error Timer(timerOpts core.TimerOptions, cmd func() error) error diff --git a/pkg/core/dump.go b/pkg/core/dump.go index aac00980..5850b5d1 100644 --- a/pkg/core/dump.go +++ b/pkg/core/dump.go @@ -2,11 +2,11 @@ package core import ( "fmt" - "html/template" "os" "path" "path/filepath" "strings" + "text/template" "time" log "github.com/sirupsen/logrus" @@ -16,7 +16,10 @@ import ( ) // Dump run a single dump, based on the provided opts -func (e *Executor) Dump(opts DumpOptions) error { +func (e *Executor) Dump(opts DumpOptions) (DumpResults, error) { + results := DumpResults{Start: time.Now()} + defer func() { results.End = time.Now() }() + targets := opts.Targets safechars := opts.Safechars dbnames := opts.DBNames @@ -29,35 +32,38 @@ func (e *Executor) Dump(opts DumpOptions) error { logger := e.Logger.WithField("run", opts.Run.String()) now := time.Now() + results.Time = now + timepart := now.Format(time.RFC3339) logger.Infof("beginning dump %s", timepart) if safechars { timepart = strings.ReplaceAll(timepart, ":", "-") } + results.Timestamp = timepart // sourceFilename: file that the uploader looks for when performing the upload // targetFilename: the remote file that is actually uploaded sourceFilename := fmt.Sprintf("db_backup_%s.%s", timepart, compressor.Extension()) - targetFilename, err := processFilenamePattern(filenamePattern, now, timepart, compressor.Extension()) + targetFilename, err := ProcessFilenamePattern(filenamePattern, now, timepart, compressor.Extension()) if err != nil { - return fmt.Errorf("failed to process filename pattern: %v", err) + return results, fmt.Errorf("failed to process filename pattern: %v", err) } // create a temporary working directory tmpdir, err := os.MkdirTemp("", "databacker_backup") if err != nil { - return fmt.Errorf("failed to make temporary working directory: %v", err) + return results, fmt.Errorf("failed to make temporary working directory: %v", err) } defer os.RemoveAll(tmpdir) // execute pre-backup scripts if any if err := preBackup(timepart, path.Join(tmpdir, sourceFilename), tmpdir, opts.PreBackupScripts, logger.Level == log.DebugLevel); err != nil { - return fmt.Errorf("error running pre-restore: %v", err) + return results, fmt.Errorf("error running pre-restore: %v", err) } // do the dump(s) workdir, err := os.MkdirTemp("", "databacker_cache") if err != nil { - return fmt.Errorf("failed to make temporary cache directory: %v", err) + return results, fmt.Errorf("failed to make temporary cache directory: %v", err) } defer os.RemoveAll(workdir) @@ -66,62 +72,69 @@ func (e *Executor) Dump(opts DumpOptions) error { // do we split the output by schema, or one big dump file? if len(dbnames) == 0 { if dbnames, err = database.GetSchemas(dbconn); err != nil { - return fmt.Errorf("failed to list database schemas: %v", err) + return results, fmt.Errorf("failed to list database schemas: %v", err) } } for _, s := range dbnames { outFile := path.Join(workdir, fmt.Sprintf("%s_%s.sql", s, timepart)) f, err := os.Create(outFile) if err != nil { - return fmt.Errorf("failed to create dump file '%s': %v", outFile, err) + return results, fmt.Errorf("failed to create dump file '%s': %v", outFile, err) } dw = append(dw, database.DumpWriter{ Schemas: []string{s}, Writer: f, }) } + results.DumpStart = time.Now() if err := database.Dump(dbconn, database.DumpOpts{ Compact: compact, SuppressUseDatabase: suppressUseDatabase, MaxAllowedPacket: maxAllowedPacket, }, dw); err != nil { - return fmt.Errorf("failed to dump database: %v", err) + return results, fmt.Errorf("failed to dump database: %v", err) } + results.DumpEnd = time.Now() // create my tar writer to archive it all together // WRONG: THIS WILL CAUSE IT TO TRY TO LOOP BACK ON ITSELF outFile := path.Join(tmpdir, sourceFilename) f, err := os.OpenFile(outFile, os.O_CREATE|os.O_WRONLY, 0o644) if err != nil { - return fmt.Errorf("failed to open output file '%s': %v", outFile, err) + return results, fmt.Errorf("failed to open output file '%s': %v", outFile, err) } defer f.Close() cw, err := compressor.Compress(f) if err != nil { - return fmt.Errorf("failed to create compressor: %v", err) + return results, fmt.Errorf("failed to create compressor: %v", err) } if err := archive.Tar(workdir, cw); err != nil { - return fmt.Errorf("error creating the compressed archive: %v", err) + return results, fmt.Errorf("error creating the compressed archive: %v", err) } // we need to close it explicitly before moving ahead f.Close() // execute post-backup scripts if any if err := postBackup(timepart, path.Join(tmpdir, sourceFilename), tmpdir, opts.PostBackupScripts, logger.Level == log.DebugLevel); err != nil { - return fmt.Errorf("error running pre-restore: %v", err) + return results, fmt.Errorf("error running pre-restore: %v", err) } // upload to each destination for _, t := range targets { - logger.Debugf("uploading via protocol %s from %s to %s", t.Protocol(), sourceFilename, targetFilename) - copied, err := t.Push(targetFilename, filepath.Join(tmpdir, sourceFilename), logger) + uploadResult := UploadResult{Target: t.URL(), Start: time.Now()} + targetCleanFilename := t.Clean(targetFilename) + logger.Debugf("uploading via protocol %s from %s to %s", t.Protocol(), sourceFilename, targetCleanFilename) + copied, err := t.Push(targetCleanFilename, filepath.Join(tmpdir, sourceFilename), logger) if err != nil { - return fmt.Errorf("failed to push file: %v", err) + return results, fmt.Errorf("failed to push file: %v", err) } logger.Debugf("completed copying %d bytes", copied) + uploadResult.Filename = targetCleanFilename + uploadResult.End = time.Now() + results.Uploads = append(results.Uploads, uploadResult) } - return nil + return results, nil } // run pre-backup scripts, if they exist @@ -147,9 +160,9 @@ func postBackup(timestamp, dumpfile, dumpdir, postBackupDir string, debug bool) return runScripts(postBackupDir, env) } -// processFilenamePattern takes a template pattern and processes it with the current time. +// ProcessFilenamePattern takes a template pattern and processes it with the current time. // Passes the timestamp as a string, because it sometimes gets changed for safechars. -func processFilenamePattern(pattern string, now time.Time, timestamp, ext string) (string, error) { +func ProcessFilenamePattern(pattern string, now time.Time, timestamp, ext string) (string, error) { if pattern == "" { pattern = DefaultFilenamePattern } diff --git a/pkg/core/dumpresults.go b/pkg/core/dumpresults.go new file mode 100644 index 00000000..b785663f --- /dev/null +++ b/pkg/core/dumpresults.go @@ -0,0 +1,22 @@ +package core + +import "time" + +// DumpResults lists results of the dump. +type DumpResults struct { + Start time.Time + End time.Time + Time time.Time + Timestamp string + DumpStart time.Time + DumpEnd time.Time + Uploads []UploadResult +} + +// UploadResult lists results of an individual upload +type UploadResult struct { + Target string + Filename string + Start time.Time + End time.Time +} diff --git a/pkg/storage/file/file.go b/pkg/storage/file/file.go index 195495b2..dda95ea6 100644 --- a/pkg/storage/file/file.go +++ b/pkg/storage/file/file.go @@ -28,6 +28,10 @@ func (f *File) Push(target, source string, logger *log.Entry) (int64, error) { return copyFile(source, filepath.Join(f.path, target)) } +func (f *File) Clean(filename string) string { + return filename +} + func (f *File) Protocol() string { return "file" } diff --git a/pkg/storage/s3/s3.go b/pkg/storage/s3/s3.go index 4394d5c0..3bdb1745 100644 --- a/pkg/storage/s3/s3.go +++ b/pkg/storage/s3/s3.go @@ -125,6 +125,10 @@ func (s *S3) Push(target, source string, logger *log.Entry) (int64, error) { return 0, nil } +func (s *S3) Clean(filename string) string { + return filename +} + func (s *S3) Protocol() string { return "s3" } diff --git a/pkg/storage/smb/smb.go b/pkg/storage/smb/smb.go index 38a043a9..8f70d870 100644 --- a/pkg/storage/smb/smb.go +++ b/pkg/storage/smb/smb.go @@ -80,7 +80,7 @@ func (s *SMB) Push(target, source string, logger *log.Entry) (int64, error) { err error ) err = s.exec(s.url, func(fs *smb2.Share, sharepath string) error { - smbFilename := fmt.Sprintf("%s%c%s", sharepath, smb2.PathSeparator, filepath.Base(strings.ReplaceAll(target, ":", "-"))) + smbFilename := fmt.Sprintf("%s%c%s", sharepath, smb2.PathSeparator, target) from, err := os.Open(source) if err != nil { return err @@ -97,6 +97,10 @@ func (s *SMB) Push(target, source string, logger *log.Entry) (int64, error) { return copied, err } +func (s *SMB) Clean(filename string) string { + return strings.ReplaceAll(filename, ":", "-") +} + func (s *SMB) Protocol() string { return "smb" } diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 9c31ab44..48752668 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -9,6 +9,7 @@ import ( type Storage interface { Protocol() string URL() string + Clean(filename string) string Push(target, source string, logger *log.Entry) (int64, error) Pull(source, target string, logger *log.Entry) (int64, error) ReadDir(dirname string, logger *log.Entry) ([]fs.FileInfo, error) diff --git a/test/backup_test.go b/test/backup_test.go index 363daf76..c1e5ff9e 100644 --- a/test/backup_test.go +++ b/test/backup_test.go @@ -77,6 +77,21 @@ type backupTarget struct { localPath string } +type testOptions struct { + compact bool + targets []string + dc *dockerContext + base string + prePost bool + backupData []byte + mysql containerPort + smb containerPort + s3 string + s3backend gofakes3.Backend + checkCommand checkCommand + dumpOptions core.DumpOptions +} + func (t backupTarget) String() string { return t.s } @@ -246,7 +261,7 @@ func (d *dockerContext) startContainer(image, name, portMap string, binds []stri containerPort: struct{}{}, } hostConfig.PortBindings = nat.PortMap{ - containerPort: []nat.PortBinding{{HostIP: "0.0.0.0"}}, + containerPort: []nat.PortBinding{{HostIP: "0.0.0.0", HostPort: ""}}, } } resp, err := d.cli.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, name) @@ -263,8 +278,29 @@ func (d *dockerContext) startContainer(image, name, portMap string, binds []stri if portMap == "" { return } - inspect, err := d.cli.ContainerInspect(ctx, cid) - if err != nil { + var ( + maxRetries = 3 + delay = 500 * time.Millisecond + inspect types.ContainerJSON + ) + for i := 0; i < maxRetries; i++ { + // Inspect the container + inspect, err = d.cli.ContainerInspect(ctx, cid) + if err != nil { + return + } + + // Check the desired status + if inspect.State.Running && len(inspect.NetworkSettings.Ports[containerPort]) > 0 { + break + } + + // Wait for delay ms before trying again + time.Sleep(delay) + } + + if len(inspect.NetworkSettings.Ports[containerPort]) == 0 { + err = fmt.Errorf("no port mapping found for container %s %s port %s", cid, name, containerPort) return } portStr := inspect.NetworkSettings.Ports[containerPort][0].HostPort @@ -417,50 +453,6 @@ func (d *dockerContext) rmContainers(cids ...string) error { // - check that the backup now is there in the right format // - clear the target -func runDumpTest(dc *dockerContext, compact bool, base string, targets []backupTarget, sequence int, smb, mysql containerPort, s3 string) error { - dbconn := database.Connection{ - User: mysqlUser, - Pass: mysqlPass, - Host: "localhost", - Port: mysql.port, - } - var targetVals []storage.Storage - // all targets should have the same sequence, with varying subsequence, so take any one - var id string - for _, target := range targets { - t := target.String() - id = target.ID() - t = target.WithPrefix(base) - localPath := target.LocalPath() - if err := os.MkdirAll(localPath, 0o755); err != nil { - return fmt.Errorf("failed to create local path %s: %w", localPath, err) - } - store, err := storage.ParseURL(t, credentials.Creds{AWS: credentials.AWSCreds{Endpoint: s3}}) - if err != nil { - return fmt.Errorf("invalid target url: %v", err) - } - targetVals = append(targetVals, store) - - } - dumpOpts := core.DumpOptions{ - Targets: targetVals, - DBConn: dbconn, - Compressor: &compression.GzipCompressor{}, - Compact: compact, - PreBackupScripts: filepath.Join(base, "backups", id, "pre-backup"), - PostBackupScripts: filepath.Join(base, "backups", id, "post-backup"), - } - timerOpts := core.TimerOptions{ - Once: true, - } - executor := &core.Executor{} - executor.SetLogger(log.New()) - - return executor.Timer(timerOpts, func() error { - return executor.Dump(dumpOpts) - }) -} - func setup(dc *dockerContext, base, backupFile, compactBackupFile string) (mysql, smb containerPort, s3url string, s3backend gofakes3.Backend, err error) { if err := dc.makeSMB(smbImage); err != nil { return mysql, smb, s3url, s3backend, fmt.Errorf("failed to build smb image: %v", err) @@ -532,6 +524,28 @@ log_queries_not_using_indexes = 1 return } +// backupTargetsToStorage convert a list of backupTarget to a list of core.Storage +func backupTargetsToStorage(targets []backupTarget, base, s3 string) ([]storage.Storage, error) { + var targetVals []storage.Storage + // all targets should have the same sequence, with varying subsequence, so take any one + for _, tgt := range targets { + tg := tgt.String() + tg = tgt.WithPrefix(base) + localPath := tgt.LocalPath() + if err := os.MkdirAll(localPath, 0o755); err != nil { + return nil, fmt.Errorf("failed to create local path %s: %v", localPath, err) + } + store, err := storage.ParseURL(tg, credentials.Creds{AWS: credentials.AWSCreds{Endpoint: s3}}) + if err != nil { + return nil, fmt.Errorf("invalid target url: %v", err) + } + targetVals = append(targetVals, store) + + } + return targetVals, nil +} + +// targetToTargets take a target string, which can contain multiple target URLs, and convert it to a list of backupTarget func targetToTargets(target string, sequence int, smb containerPort, base string) ([]backupTarget, error) { var ( targets = strings.Fields(target) @@ -580,36 +594,77 @@ func targetToTargets(target string, sequence int, smb containerPort, base string return allTargets, nil } -type checkCommand func(t *testing.T, base string, validBackup []byte, s3backend gofakes3.Backend, targets []backupTarget) +type checkCommand func(t *testing.T, base string, validBackup []byte, s3backend gofakes3.Backend, targets []backupTarget, results core.DumpResults) -func runTest(t *testing.T, dc *dockerContext, compact bool, targets []string, base string, prePost bool, backupData []byte, mysql, smb containerPort, s3 string, s3backend gofakes3.Backend, checkCommand checkCommand) { +func runTest(t *testing.T, opts testOptions) { // run backups for each target - for i, target := range targets { + for i, target := range opts.targets { t.Run(target, func(t *testing.T) { // should add t.Parallel() here for parallel execution, but later log.Debugf("Running test for target '%s'", target) - allTargets, err := targetToTargets(target, i, smb, base) + allTargets, err := targetToTargets(target, i, opts.smb, opts.base) if err != nil { t.Fatalf("failed to parse target: %v", err) } log.Debugf("Populating data for target %s", target) - if err := populateVol(base, allTargets); err != nil { + if err := populateVol(opts.base, allTargets); err != nil { t.Fatalf("failed to populate volume for target %s: %v", target, err) } - if err := populatePrePost(base, allTargets); err != nil { + if err := populatePrePost(opts.base, allTargets); err != nil { t.Fatalf("failed to populate pre-post for target %s: %v", target, err) } log.Debugf("Running backup for target %s", target) - if err := runDumpTest(dc, compact, base, allTargets, i, smb, mysql, s3); err != nil { + opts.dumpOptions.DBConn = database.Connection{ + User: mysqlUser, + Pass: mysqlPass, + Host: "localhost", + Port: opts.mysql.port, + } + // take []backupTarget and convert to []storage.Storage that can be passed to DumpOptions + targetVals, err := backupTargetsToStorage(allTargets, opts.base, opts.s3) + if err != nil { + t.Fatalf("failed to convert backup targets to storage: %v", err) + } + + id := allTargets[0].ID() + + opts.dumpOptions.Targets = targetVals + opts.dumpOptions.PreBackupScripts = filepath.Join(opts.base, "backups", id, "pre-backup") + opts.dumpOptions.PostBackupScripts = filepath.Join(opts.base, "backups", id, "post-backup") + + timerOpts := core.TimerOptions{ + Once: true, + } + executor := &core.Executor{} + executor.SetLogger(log.New()) + + var results core.DumpResults + if err := executor.Timer(timerOpts, func() error { + ret, err := executor.Dump(opts.dumpOptions) + results = ret + return err + }); err != nil { t.Fatalf("failed to run dump test: %v", err) } - checkCommand(t, base, backupData, s3backend, allTargets) + // check that the filename matches the pattern + for i, upload := range results.Uploads { + expected, err := core.ProcessFilenamePattern(opts.dumpOptions.FilenamePattern, results.Time, results.Timestamp, opts.dumpOptions.Compressor.Extension()) + if err != nil { + t.Fatalf("failed to process filename pattern: %v", err) + } + clean := opts.dumpOptions.Targets[i].Clean(expected) + if upload.Filename != clean { + t.Fatalf("filename %s does not match expected %s", upload.Filename, clean) + } + } + + opts.checkCommand(t, opts.base, opts.backupData, opts.s3backend, allTargets, results) }) } } -func checkDumpTest(t *testing.T, base string, expected []byte, s3backend gofakes3.Backend, targets []backupTarget) { +func checkDumpTest(t *testing.T, base string, expected []byte, s3backend gofakes3.Backend, targets []backupTarget, results core.DumpResults) { // all of it is in the volume we created, so check from there var ( backupDataReader io.Reader @@ -646,6 +701,8 @@ func checkDumpTest(t *testing.T, base string, expected []byte, s3backend gofakes return } + targetFilename := results.Uploads[i].Filename + switch scheme { case "s3": // because we had to add the bucket at the beginning of the path, because fakes3 @@ -655,46 +712,17 @@ func checkDumpTest(t *testing.T, base string, expected []byte, s3backend gofakes // we still will remove the bucketName p = strings.TrimPrefix(p, "/") p = strings.TrimPrefix(p, bucketName+"/") - objList, err := s3backend.ListBucket( - bucketName, - &gofakes3.Prefix{HasPrefix: true, Prefix: p}, - gofakes3.ListBucketPage{}, - ) + p = filepath.Join(p, targetFilename) + obj, err := s3backend.GetObject(bucketName, p, nil) if err != nil { - t.Fatalf("failed to get backup objects from s3: %v", err) + t.Fatalf("failed to get backup object %s from s3: %v", p, err) return } - for _, objInfo := range objList.Contents { - if strings.HasSuffix(objInfo.Key, ".tgz") { - obj, err := s3backend.GetObject(bucketName, objInfo.Key, nil) - if err != nil { - t.Fatalf("failed to get backup object %s from s3: %v", objInfo.Key, err) - return - } - backupDataReader = obj.Contents - break - } - } + backupDataReader = obj.Contents default: + var err error bdir := target.LocalPath() - - var backupFile string - entries, err := os.ReadDir(bdir) - if err != nil { - t.Fatalf("failed to read backup directory %s: %v", bdir, err) - return - } - for _, entry := range entries { - if strings.HasSuffix(entry.Name(), ".tgz") { - backupFile = entry.Name() - break - } - } - if backupFile == "" { - assert.NotEmpty(t, backupFile, "missing backup tgz file %s", id) - continue - } - backupFile = filepath.Join(bdir, backupFile) + backupFile := filepath.Join(bdir, targetFilename) backupDataReader, err = os.Open(backupFile) if err != nil { t.Fatalf("failed to read backup file %s: %v", backupFile, err) @@ -840,16 +868,62 @@ func TestIntegration(t *testing.T) { // check just the contents of a compact backup t.Run("full", func(t *testing.T) { - runTest(t, dc, false, []string{ - "/full-backups/", - }, base, false, backupData, mysql, smb, s3, s3backend, checkDumpTest) + dumpOpts := core.DumpOptions{ + Compressor: &compression.GzipCompressor{}, + Compact: false, + } + runTest(t, testOptions{ + targets: []string{"/full-backups/"}, + dc: dc, + base: base, + backupData: backupData, + mysql: mysql, + smb: smb, + s3: s3, + s3backend: s3backend, + dumpOptions: dumpOpts, + checkCommand: checkDumpTest, + }) }) // check just the contents of a backup without minimizing metadata (i.e. non-compact) t.Run("compact", func(t *testing.T) { - runTest(t, dc, true, []string{ - "/compact-backups/", - }, base, false, compactBackupData, mysql, smb, s3, s3backend, checkDumpTest) + dumpOpts := core.DumpOptions{ + Compressor: &compression.GzipCompressor{}, + Compact: true, + } + runTest(t, testOptions{ + targets: []string{"/compact-backups/"}, + dc: dc, + base: base, + backupData: compactBackupData, + mysql: mysql, + smb: smb, + s3: s3, + s3backend: s3backend, + dumpOptions: dumpOpts, + checkCommand: checkDumpTest, + }) + }) + + t.Run("pattern", func(t *testing.T) { + dumpOpts := core.DumpOptions{ + Compressor: &compression.GzipCompressor{}, + Compact: false, + FilenamePattern: "backup-{{ .Sequence }}-{{ .Subsequence }}.tgz", + } + runTest(t, testOptions{ + targets: []string{"/full-backups/"}, + dc: dc, + base: base, + backupData: backupData, + mysql: mysql, + smb: smb, + s3: s3, + s3backend: s3backend, + dumpOptions: dumpOpts, + checkCommand: checkDumpTest, + }) }) // test targets @@ -864,16 +938,30 @@ func TestIntegration(t *testing.T) { if err := os.Setenv("AWS_SECRET_ACCESS_KEY", "1234567"); err != nil { t.Fatalf("failed to set AWS_SECRET_ACCESS_KEY: %v", err) } - runTest(t, dc, false, []string{ - "/backups/", - "file:///backups/", - "smb://smb/noauth/", - "smb://user:pass@smb/auth", - "smb://CONF;user:pass@smb/auth", - fmt.Sprintf("s3://%s/", bucketName), - "file:///backups/ file:///backups/", - }, base, true, backupData, mysql, smb, s3, s3backend, checkDumpTest) + dumpOpts := core.DumpOptions{ + Compressor: &compression.GzipCompressor{}, + Compact: false, + } + runTest(t, testOptions{ + targets: []string{ + "/backups/", + "file:///backups/", + "smb://smb/noauth/", + "smb://user:pass@smb/auth", + "smb://CONF;user:pass@smb/auth", + fmt.Sprintf("s3://%s/", bucketName), + "file:///backups/ file:///backups/", + }, + dc: dc, + base: base, + backupData: backupData, + mysql: mysql, + smb: smb, + s3: s3, + s3backend: s3backend, + dumpOptions: dumpOpts, + checkCommand: checkDumpTest, + }) }) - }) }