diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 444e7301..9fc73280 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -24,6 +24,21 @@ jobs: steps: - name: checkout uses: actions/checkout@v3 + - uses: actions/setup-go@v3 + with: + go-version: '1.19' + - name: golangci-lint + uses: golangci/golangci-lint-action@v3 + with: + version: latest + - name: Build + run: go build -o dist/mysql-backup -v . + - name: vet + run: make vet + - name: Test + run: make test + - name: Integration Test + run: make integration_test - name: Set up QEMU uses: docker/setup-qemu-action@v2 - name: Set up Docker Buildx diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 82476363..489fe385 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -31,12 +31,24 @@ jobs: with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - - name: Build and push - id: docker_build + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + # list of Docker images to use as base name for tags + images: | + ${{env.IMAGE_NAME}} + # generate Docker tags based on the following events/attributes + # for any semver, including rc and alpha, etc. take the tag as is + # for ones that are exactly X.Y.Z, also tag it as latest + tags: | + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}}.{{patch}},value=latest + - name: Build and push semver tag + id: docker_build_push_semver uses: docker/build-push-action@v2 with: push: true platforms: linux/amd64,linux/arm64 tags: | - ${{env.IMAGE_NAME}}:${{ github.ref_name }} - ${{env.IMAGE_NAME}}:latest + ${{ steps.meta.outputs.tags }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..f4816cd5 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +dist/ +tmp/ diff --git a/Dockerfile b/Dockerfile index b897f628..93187c88 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,25 +1,23 @@ # mysql backup image -FROM alpine:3.17 -LABEL org.opencontainers.image.authors="https://github.com/deitch" +FROM golang:1.19.6-alpine3.17 as build + +COPY . /src/mysql-backup +WORKDIR /src/mysql-backup -# install the necessary client -# the mysql-client must be 10.3.15 or later -RUN apk add --update 'mariadb-client>10.3.15' mariadb-connector-c bash python3 py3-pip samba-client shadow openssl coreutils && \ - rm -rf /var/cache/apk/* && \ - touch /etc/samba/smb.conf && \ - pip3 install awscli +RUN mkdir /out && go build -o /out/mysql-backup . + +# we would do from scratch, but we need basic utilities in order to support pre/post scripts +FROM alpine:3.17 +LABEL org.opencontainers.image.authors="https://github.com/databacker" # set us up to run as non-root user -RUN groupadd -g 1005 appuser && \ - useradd -r -u 1005 -g appuser appuser -# add home directory for user so IRSA AWS auth works -RUN mkdir -p /home/appuser && chmod 0755 /home/appuser && chown appuser /home/appuser -# ensure smb stuff works correctly -RUN mkdir -p /var/cache/samba && chmod 0755 /var/cache/samba && chown appuser /var/cache/samba && chown appuser /var/lib/samba/private +RUN apk add bash +RUN addgroup -g 1005 appuser && \ + adduser -u 1005 -G appuser -D appuser USER appuser -# install the entrypoint -COPY functions.sh / +COPY --from=build /out/mysql-backup /mysql-backup + COPY entrypoint /entrypoint # start diff --git a/Makefile b/Makefile index 155a9773..94e3cfed 100644 --- a/Makefile +++ b/Makefile @@ -13,19 +13,19 @@ push: build docker tag $(BUILDIMAGE) $(TARGET) docker push $(TARGET) -test_dump: - cd test && DEBUG=$(DEBUG) ./test_dump.sh +integration_test: + go test -v ./test --tags=integration -test_cron: - cd test && ./test_cron.sh +integration_test_debug: + dlv --wd=./test test ./test --build-flags="-tags=integration" -test_source_target: - cd test && ./test_source_target.sh +vet: + go vet --tags=integration ./... -test_restore: - cd test && ./test_restore.sh +test: unit_test integration_test -test: test_dump test_restore test_cron test_source_target +unit_test: + go test -v ./... .PHONY: clean-test-stop clean-test-remove clean-test clean-test-stop: @@ -39,15 +39,5 @@ clean-test-remove: $(eval IDS:=$(shell docker ps -a --filter label=mysqltest -q)) @if [ -n "$(IDS)" ]; then docker rm $(IDS); fi @echo - @echo Remove Volumes - $(eval IDS:=$(shell docker volume ls --filter label=mysqltest -q)) - @if [ -n "$(IDS)" ]; then docker volume rm $(IDS); fi - @echo - -clean-test-network: - @echo Remove Networks - $(eval IDS:=$(shell docker network ls --filter label=mysqltest -q)) - @if [ -n "$(IDS)" ]; then docker network rm $(IDS); fi - @echo -clean-test: clean-test-stop clean-test-remove clean-test-network +clean-test: clean-test-stop clean-test-remove diff --git a/README-bash.md b/README-bash.md new file mode 100644 index 00000000..e886817c --- /dev/null +++ b/README-bash.md @@ -0,0 +1,381 @@ +# mysql-backup +Back up mysql databases to... anywhere! + +## Overview +mysql-backup is a simple way to do MySQL database backups and restores when the database is running in a container. + +It has the following features: + +* dump and restore +* dump to local filesystem or to SMB server +* select database user and password +* connect to any container running on the same system +* select how often to run a dump +* select when to start the first dump, whether time of day or relative to container start time + +Please see [CONTRIBUTORS.md](./CONTRIBUTORS.md) for a list of contributors. + +## Support + +Support is available at the [databack Slack channel](http://databack.slack.com); register [here](https://join.slack.com/t/databack/shared_invite/zt-1cnbo2zfl-0dQS895icOUQy31RAruf7w). We accept issues here and general support questions on Slack. + +## Backup +To run a backup, launch `mysql-backup` image as a container with the correct parameters. Everything is controlled by environment variables passed to the container. + +For example: + +````bash +docker run -d --restart=always -e DB_DUMP_FREQ=60 -e DB_DUMP_BEGIN=2330 -e DB_DUMP_TARGET=/db -e DB_SERVER=my-db-container -v /local/file/path:/db databack/mysql-backup +```` + +The above will run a dump every 60 minutes, beginning at the next 2330 local time, from the database accessible in the container `my-db-container`. + +The following are the environment variables for a backup: + +__You should consider the [use of `--env-file=`](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables-e-env-env-file), [docker secrets](https://docs.docker.com/engine/swarm/secrets/) to keep your secrets out of your shell history__ + +* `DB_SERVER`: hostname to connect to database. Required. +* `DB_PORT`: port to use to connect to database. Optional, defaults to `3306` +* `DB_USER`: username for the database +* `DB_PASS`: password for the database +* `DB_NAMES`: names of databases to dump (separated by space); defaults to all databases in the database server +* `DB_NAMES_EXCLUDE`: names of databases (separated by space) to exclude from the dump; `information_schema`. `performance_schema`, `sys` and `mysql` are excluded by default. This only applies if `DB_DUMP_BY_SCHEMA` is set to `true`. For example, if you set `DB_NAMES_EXCLUDE=database1 db2` and `DB_DUMP_BY_SCHEMA=true` then these two databases will not be dumped by mysqldump +* `SINGLE_DATABASE`: If is set to `true`, mysqldump command will run without `--databases` flag. This avoid `USE ;` statement which is useful for the cases in which you want to import the dumpfile into a database with a different name. +* `DB_DUMP_FREQ`: How often to do a dump, in minutes. Defaults to 1440 minutes, or once per day. +* `DB_DUMP_BEGIN`: What time to do the first dump. Defaults to immediate. Must be in one of two formats: + * Absolute: HHMM, e.g. `2330` or `0415` + * Relative: +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half +* `DB_DUMP_CRON`: Set the dump schedule using standard [crontab syntax](https://en.wikipedia.org/wiki/Cron), a single line. +* `RUN_ONCE`: Run the backup once and exit if `RUN_ONCE` is set. Useful if you use an external scheduler (e.g. as part of an orchestration solution like Cattle or Docker Swarm or [kubernetes cron jobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/)) and don't want the container to do the scheduling internally. If you use this option, all other scheduling options, like `DB_DUMP_FREQ` and `DB_DUMP_BEGIN` and `DB_DUMP_CRON`, become obsolete. +* `DB_DUMP_DEBUG`: If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. +* `DB_DUMP_TARGET`: Where to put the dump file, should be a directory. Supports four formats: + * Local: If the value of `DB_DUMP_TARGET` starts with a `/` character, will dump to a local path, which should be volume-mounted. + * SMB: If the value of `DB_DUMP_TARGET` is a URL of the format `smb://hostname/share/path/` then it will connect via SMB. + * S3: If the value of `DB_DUMP_TARGET` is a URL of the format `s3://bucketname/path` then it will connect via awscli. + * Multiple: If the value of `DB_DUMP_TARGET` contains multiple targets, the targets should be separated by a whitespace **and** the value surrounded by quotes, e.g. `"/db s3://bucketname/path"`. +* `DB_DUMP_SAFECHARS`: The dump filename usually includes the character `:` in the date, to comply with RFC3339. Some systems and shells don't like that character. If this environment variable is set, it will replace all `:` with `-`. +* `AWS_ACCESS_KEY_ID`: AWS Key ID +* `AWS_SECRET_ACCESS_KEY`: AWS Secret Access Key +* `AWS_DEFAULT_REGION`: Region in which the bucket resides +* `AWS_ENDPOINT_URL`: Specify an alternative endpoint for s3 interopable systems e.g. Digitalocean +* `AWS_CLI_OPTS`: Additional arguments to be passed to the `aws` part of the `aws s3 cp` command, click [here](https://docs.aws.amazon.com/cli/latest/reference/#options) for a list. _Be careful_, as you can break something! +* `AWS_CLI_S3_CP_OPTS`: Additional arguments to be passed to the `s3 cp` part of the `aws s3 cp` command, click [here](https://docs.aws.amazon.com/cli/latest/reference/s3/cp.html#options) for a list. If you are using AWS KMS, `sse`, `sse-kms-key-id`, etc., may be of interest. +* `SMB_USER`: SMB username. May also be specified in `DB_DUMP_TARGET` with an `smb://` url. If both specified, this variable overrides the value in the URL. +* `SMB_PASS`: SMB password. May also be specified in `DB_DUMP_TARGET` with an `smb://` url. If both specified, this variable overrides the value in the URL. +* `COMPRESSION`: Compression to use. Supported are: `gzip` (default), `bzip2` +* `DB_DUMP_BY_SCHEMA`: Whether to use separate files per schema in the compressed file (`true`), or a single dump file (`false`). Defaults to `false`. +* `DB_DUMP_KEEP_PERMISSIONS`: Whether to keep permissions for a file target. By default, `mysql-backup` copies the backup compressed file to the target with `cp -a`. In certain filesystems with certain permissions, this may cause errors. You can disable the `-a` flag by setting `DB_DUMP_KEEP_PERMISSIONS=false`. Defaults to `true`. +* `MYSQLDUMP_OPTS`: A string of options to pass to `mysqldump`, e.g. `MYSQLDUMP_OPTS="--opt abc --param def --max_allowed_packet=123455678"` will run `mysqldump --opt abc --param def --max_allowed_packet=123455678` +* `NICE`: true to perform mysqldump with ionice and nice option:- check for more information :- http://eosrei.net/articles/2013/03/forcing-mysqldump-always-be-nice-cpu-and-io +* `TMP_PATH`: tmp directory to be used during backup creation and other operations. Optional, defaults to `/tmp` + +### Scheduling +There are several options for scheduling how often a backup should run: + +* `RUN_ONCE`: run just once and exit. +* `DB_DUMP_FREQ` and `DB_DUMP_BEGIN`: run every x minutes, and run the first one at a particular time. +* `DB_DUMP_CRON`: run on a schedule. + +#### Cron Scheduling +If a cron-scheduled backup takes longer than the beginning of the next backup window, it will be skipped. For example, if your cron line is scheduled to backup every hour, as follows: + +``` +0 * * * * +``` + +And the backup that runs at 13:00 finishes at 14:05, the next backup will not be immediate, but rather at 15:00. + +The cron algorithm is as follows: after each backup run, calculate the next time that the cron statement will be true and schedule the backup then. + +#### Order of Priority +The scheduling options have an order of priority: + +1. `RUN_ONCE` runs once, immediately, and exits, ignoring everything else. +2. `DB_DUMP_CRON`: runs according to the cron schedule, ignoring `DB_DUMP_FREQ` and `DB_DUMP_BEGIN`. +3. `DB_DUMP_FREQ` and `DB_DUMP_BEGIN`: if nothing else is set. + + + +### Permissions +By default, the backup/restore process does **not** run as root (UID O). Whenever possible, you should run processes (not just in containers) as users other than root. In this case, it runs as username `appuser` with UID/GID `1005`. + +In most scenarios, this will not affect your backup process negatively. However, if you are using the "Local" dump target, i.e. your `DB_DUMP_TARGET` starts with `/` - and, most likely, is a volume mounted into the container - you can run into permissions issues. For example, if your mounted directory is owned by root on the host, then the backup process will be unable to write to it. + +In this case, you have two options: + +* Run the container as root, `docker run --user 0 ... ` or, in i`docker-compose.yml`, `user: "0"` +* Ensure your mounted directory is writable as UID or GID `1005`. + + +### Database Container +In order to perform the actual dump, `mysql-backup` needs to connect to the database container. You **must** pass the database hostname - which can be another container or any database process accessible from the backup container - by passing the environment variable `DB_SERVER` with the hostname or IP address of the database. You **may** override the default port of `3306` by passing the environment variable `DB_PORT`. + +````bash +docker run -d --restart=always -e DB_USER=user123 -e DB_PASS=pass123 -e DB_DUMP_FREQ=60 -e DB_DUMP_BEGIN=2330 -e DB_DUMP_TARGET=/db -e DB_SERVER=my-db-container -v /local/file/path:/db databack/mysql-backup +```` + +### Dump Target + +The dump target is where you want the backup files to be saved. The backup file *always* is a compressed file the following format: + +`db_backup_YYYY-MM-DDTHH:mm:ssZ.` + +Where the date is RFC3339 date format, excluding the milliseconds portion. + +* YYYY = year in 4 digits +* MM = month number from 01-12 +* DD = date for 01-31 +* HH = hour from 00-23 +* mm = minute from 00-59 +* ss = seconds from 00-59 +* T = literal character `T`, indicating the separation between date and time portions +* Z = literal character `Z`, indicating that the time provided is UTC, or "Zulu" +* compression = appropriate file ending for selected compression, one of: `gz` (gzip, default); `bz2` (bzip2) + +The time used is UTC time at the moment the dump begins. + +Notes on format: + +* SMB does not allow for `:` in a filename (depending on server options), so they are replaced with the `-` character when writing to SMB. +* Some shells do not handle a `:` in the filename gracefully. Although these usually are legitimate characters as far as the _filesystem_ is concerned, your shell may not like it. To avoid this issue, you can set the "no-colons" options with the environment variable `DB_DUMP_SAFECHARS` + +The dump target is the location where the dump should be placed, defaults to `/backup` in the container. Of course, having the backup in the container does not help very much, so we very strongly recommend you volume mount it outside somewhere. See the above example. + +If you use a URL like `smb://host/share/path`, you can have it save to an SMB server. If you need loging credentials, use `smb://user:pass@host/share/path`. + +Note that for smb, if the username includes a domain, e.g. your user is `mydom\myuser`, then you should use the samb convention of replacing the '\' with a ';'. In other words `smb://mydom;myuser:pass@host/share/path` + +If you use a URL like `s3://bucket/path`, you can have it save to an S3 bucket. + +Note that for s3, you'll need to specify your AWS credentials and default AWS region via `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `AWS_DEFAULT_REGION` + +Also note that if you are using an s3 interopable storage system like DigitalOcean you can use that as the target by setting `AWS_ENDPOINT_URL` to `${REGION_NAME}.digitaloceanspaces.com` and setting `DB_DUMP_TARGET` to `s3://bucketname/path`. + +#### Custom backup source file name +There may be use-cases where you need to modify the source path of the backup file **before** it gets uploaded to the dump target. +An example is combining multiple compressed files into one and giving it a new name, i.e. ```db-other-files-combined.tar.gz```. +To do that, place an executable file called `source.sh` in the following path: + + /scripts.d/source.sh + +Whatever your script returns to _stdout_ will be used as the source name for the backup file. + +The following exported environment variables will be available to the script above: + +* `DUMPFILE`: full path in the container to the output file +* `NOW`: date of the backup, as included in `DUMPFILE` and given by `date -u +"%Y-%m-%dT%H:%M:%SZ"` +* `DUMPDIR`: path to the destination directory so for example you can copy a new tarball including some other files along with the sql dump. +* `DB_DUMP_DEBUG`: To enable debug mode in post-backup scripts. + +**Example run:** + + NOW=20180930151304 DUMPFILE=/tmp/backups/db_backup_201809301513.gz DUMPDIR=/backup DB_DUMP_DEBUG=true /scripts.d/source.sh + +**Example custom source script:** + +```bash + #!/bin/bash + + # Rename source file + echo -n "db-plus-wordpress_${NOW}.gz" +``` + +#### Custom backup target file name +There may be use-cases where you need to modify the target upload path of the backup file **before** it gets uploaded. +An example is uploading a backup to a date stamped object key path in S3, i.e. ```s3://bucket/2018/08/23/path```. +To do that, place an executable file called ```target.sh``` in the following path: + + /scripts.d/target.sh + +Whatever your script returns to _stdout_ will be used as the name for the backup file. + +The following exported environment variables will be available to the script above: + +* `DUMPFILE`: full path in the container to the output file +* `NOW`: date of the backup, as included in `DUMPFILE` and given by `date -u +"%Y-%m-%dT%H:%M:%SZ"` +* `DUMPDIR`: path to the destination directory so for example you can copy a new tarball including some other files along with the sql dump. +* `DB_DUMP_DEBUG`: To enable debug mode in post-backup scripts. + +**Example run:** + + NOW=20180930151304 DUMPFILE=/tmp/backups/db_backup_201809301513.gz DUMPDIR=/backup DB_DUMP_DEBUG=true /scripts.d/target.sh + +**Example custom target script:** + +```bash + #!/bin/bash + + # Rename target file + echo -n "db-plus-wordpress-uploaded_${NOW}.gz" +``` + +### Backup pre and post processing + +Any executable script with _.sh_ extension in _/scripts.d/pre-backup/_ or _/scripts.d/post-backup/_ directories in the container will be executed before +and after the backup dump process has finished respectively, but **before** +uploading the backup file to its ultimate target. This is useful if you need to +include some files along with the database dump, for example, to backup a +_WordPress_ install. + +To use them you need to add a host volume that points to the post-backup scripts in the docker host. Start the container like this: + +````bash +docker run -d --restart=always -e DB_USER=user123 -e DB_PASS=pass123 -e DB_DUMP_FREQ=60 \ + -e DB_DUMP_BEGIN=2330 -e DB_DUMP_TARGET=/db -e DB_SERVER=my-db-container:db \ + -v /path/to/pre-backup/scripts:/scripts.d/pre-backup \ + -v /path/to/post-backup/scripts:/scripts.d/post-backup \ + -v /local/file/path:/db \ + databack/mysql-backup +```` + +Or, if you prefer compose: + +```yml +version: '2.1' +services: + backup: + image: databack/mysql-backup + restart: always + volumes: + - /local/file/path:/db + - /path/to/pre-backup/scripts:/scripts.d/pre-backup + - /path/to/post-backup/scripts:/scripts.d/post-backup + env: + - DB_DUMP_TARGET=/db + - DB_USER=user123 + - DB_PASS=pass123 + - DB_DUMP_FREQ=60 + - DB_DUMP_BEGIN=2330 + - DB_SERVER=mysql_db + mysql_db: + image: mysql + .... +``` + +The scripts are _executed_ in the [entrypoint](https://github.com/databack/mysql-backup/blob/master/entrypoint) script, which means it has access to all exported environment variables. The following are available, but we are happy to export more as required (just open an issue or better yet, a pull request): + +* `DUMPFILE`: full path in the container to the output file +* `NOW`: date of the backup, as included in `DUMPFILE` and given by `date -u +"%Y-%m-%dT%H:%M:%SZ"` +* `DUMPDIR`: path to the destination directory so for example you can copy a new tarball including some other files along with the sql dump. +* `DB_DUMP_DEBUG`: To enable debug mode in post-backup scripts. + +In addition, all of the environment variables set for the container will be available to the script. + +For example, the following script will rename the backup file after the dump is done: + +````bash +#!/bin/bash +# Rename backup file. +if [[ -n "$DB_DUMP_DEBUG" ]]; then + set -x +fi + +if [ -e ${DUMPFILE} ]; +then + now=$(date +"%Y-%m-%d-%H_%M") + new_name=db_backup-${now}.gz + old_name=$(basename ${DUMPFILE}) + echo "Renaming backup file from ${old_name} to ${new_name}" + mv ${DUMPFILE} ${DUMPDIR}/${new_name} +else + echo "ERROR: Backup file ${DUMPFILE} does not exist!" +fi + +```` + +You can think of this as a sort of basic plugin system. Look at the source of the [entrypoint](https://github.com/databack/mysql-backup/blob/master/entrypoint) script for other variables that can be used. + +### Encrypting the Backup + +Post-processing also give you options to encrypt the backup using openssl. The openssl binary is available +to the processing scripts. + +The sample [examples/encrypt.sh](./examples/encrypt.sh) provides a sample post-processing script that you can use +to encrypt your backup with AES256. + +## Restore +### Dump Restore +If you wish to run a restore to an existing database, you can use mysql-backup to do a restore. + +You need only the following environment variables: + +__You should consider the [use of `--env-file=`](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables-e-env-env-file) to keep your secrets out of your shell history__ + +* `DB_SERVER`: hostname to connect to database. Required. +* `DB_PORT`: port to use to connect to database. Optional, defaults to `3306` +* `DB_USER`: username for the database +* `DB_PASS`: password for the database +* `DB_NAMES`: name of database to restore to. Required if `SINGLE_DATABASE=true`, otherwise has no effect. Although the name is plural, it must contain exactly one database name. +* `SINGLE_DATABASE`: If is set to `true`, `DB_NAMES` is required and mysql command will run with `--database=$DB_NAMES` flag. This avoids the need of `USE ;` statement, which is useful when restoring from a file saved with `SINGLE_DATABASE` set to `true`. +* `DB_RESTORE_TARGET`: path to the actual restore file, which should be a compressed dump file. The target can be an absolute path, which should be volume mounted, an smb or S3 URL, similar to the target. +* `DB_DUMP_DEBUG`: if `true`, dump copious outputs to the container logs while restoring. +* To use the S3 driver `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `AWS_DEFAULT_REGION` will need to be defined. + + +Examples: + +1. Restore from a local file: `docker run -e DB_SERVER=gotodb.example.com -e DB_USER=user123 -e DB_PASS=pass123 -e DB_RESTORE_TARGET=/backup/db_backup_201509271627.gz -v /local/path:/backup databack/mysql-backup` +2. Restore from an SMB file: `docker run -e DB_SERVER=gotodb.example.com -e DB_USER=user123 -e DB_PASS=pass123 -e DB_RESTORE_TARGET=smb://smbserver/share1/backup/db_backup_201509271627.gz databack/mysql-backup` +3. Restore from an S3 file: `docker run -e DB_SERVER=gotodb.example.com -e AWS_ACCESS_KEY_ID=awskeyid -e AWS_SECRET_ACCESS_KEY=secret -e AWS_DEFAULT_REGION=eu-central-1 -e DB_USER=user123 -e DB_PASS=pass123 -e DB_RESTORE_TARGET=s3://bucket/path/db_backup_201509271627.gz databack/mysql-backup` + +### Restore when using docker-compose +`docker-compose` automagically creates a network when started. `docker run` simply attaches to the bridge network. If you are trying to communicate with a mysql container started by docker-compose, you'll need to specify the network in your command arguments. You can use `docker network ls` to see what network is being used, or you can declare a network in your docker-compose.yml. + +#### Example: +`docker run -e DB_SERVER=gotodb.example.com -e DB_USER=user123 -e DB_PASS=pass123 -e DB_RESTORE_TARGET=/backup/db_backup_201509271627.gz -v /local/path:/backup --network="skynet" databack/mysql-backup` + +### Using docker (or rancher) secrets +Environment variables used in this image can be passed in files as well. This is useful when you are using docker (or rancher) secrets for storing sensitive information. + +As you can set environment variable with `-e ENVIRONMENT_VARIABLE=value`, you can also use `-e ENVIRONMENT_VARIABLE_FILE=/path/to/file`. Contents of that file will be assigned to the environment variable. + +**Example:** + +```bash +docker run -d \ + -e DB_HOST_FILE=/run/secrets/DB_HOST \ + -e DB_USER_FILE=/run/secrets/DB_USER \ + -e DB_PASS_FILE=/run/secrets/DB_PASS \ + -v /local/file/path:/db \ + databack/mysql-backup +``` + +### Restore pre and post processing + +As with backups pre and post processing, you can do the same with restore operations. +Any executable script with _.sh_ extension in _/scripts.d/pre-restore/_ or +_/scripts.d/post-restore/_ directories in the container will be executed before the restore process starts and after it finishes respectively. This is useful if you need to +restore a backup file that includes some files along with the database dump. + +For example, to restore a _WordPress_ install, you would uncompress a tarball containing +the db backup and a second tarball with the contents of a WordPress install on +`pre-restore`. Then on `post-restore`, uncompress the WordPress files on the container's web server root directory. + +For an example take a look at the post-backup examples, all variables defined for post-backup scripts are available for pre-processing too. Also don't forget to add the same host volumes for `pre-restore` and `post-restore` directories as described for post-backup processing. + +### Automated Build +This github repo is the source for the mysql-backup image. The actual image is stored on the docker hub at `databack/mysql-backup`, and is triggered with each commit to the source by automated build via Webhooks. + +There are 2 builds: 1 for version based on the git tag, and another for the particular version number. + +## Tests + +The tests all run in docker containers, to avoid the need to install anything other than `make` and `docker`, and even can run over remote docker connections, avoiding any local bind-mounts. To run all tests: + +``` +make test +``` + +To run with debugging + +``` +make test DEBUG=debug +``` + +The above will generate _copious_ outputs, so you might want to redirect stdout and stderr to a file. + +This runs each of the several testing targets, each of which is a script in `test/test_*.sh`, which sets up tests, builds containers, runs the tests, and collects the output. + +## License +Released under the MIT License. +Copyright Avi Deitcher https://github.com/deitch diff --git a/README.md b/README.md index 36680e26..0648b03b 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,10 @@ # mysql-backup + Back up mysql databases to... anywhere! ## Overview -mysql-backup is a simple way to do MySQL database backups and restores when the database is running in a container. + +mysql-backup is a simple way to do MySQL database backups and restores. It has the following features: @@ -15,286 +17,69 @@ It has the following features: Please see [CONTRIBUTORS.md](./CONTRIBUTORS.md) for a list of contributors. -## Support - -Support is available at the [databack Slack channel](http://databack.slack.com); register [here](https://join.slack.com/t/databack/shared_invite/zt-1cnbo2zfl-0dQS895icOUQy31RAruf7w). We accept issues here and general support questions on Slack. - -## Backup -To run a backup, launch `mysql-backup` image as a container with the correct parameters. Everything is controlled by environment variables passed to the container. - -For example: - -````bash -docker run -d --restart=always -e DB_DUMP_FREQ=60 -e DB_DUMP_BEGIN=2330 -e DB_DUMP_TARGET=/db -e DB_SERVER=my-db-container -v /local/file/path:/db databack/mysql-backup -```` - -The above will run a dump every 60 minutes, beginning at the next 2330 local time, from the database accessible in the container `my-db-container`. - -The following are the environment variables for a backup: - -__You should consider the [use of `--env-file=`](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables-e-env-env-file), [docker secrets](https://docs.docker.com/engine/swarm/secrets/) to keep your secrets out of your shell history__ - -* `DB_SERVER`: hostname to connect to database. Required. -* `DB_PORT`: port to use to connect to database. Optional, defaults to `3306` -* `DB_USER`: username for the database -* `DB_PASS`: password for the database -* `DB_NAMES`: names of databases to dump (separated by space); defaults to all databases in the database server -* `DB_NAMES_EXCLUDE`: names of databases (separated by space) to exclude from the dump; `information_schema`. `performance_schema`, `sys` and `mysql` are excluded by default. This only applies if `DB_DUMP_BY_SCHEMA` is set to `true`. For example, if you set `DB_NAMES_EXCLUDE=database1 db2` and `DB_DUMP_BY_SCHEMA=true` then these two databases will not be dumped by mysqldump -* `SINGLE_DATABASE`: If is set to `true`, mysqldump command will run without `--databases` flag. This avoid `USE ;` statement which is useful for the cases in which you want to import the dumpfile into a database with a different name. -* `DB_DUMP_FREQ`: How often to do a dump, in minutes. Defaults to 1440 minutes, or once per day. -* `DB_DUMP_BEGIN`: What time to do the first dump. Defaults to immediate. Must be in one of two formats: - * Absolute: HHMM, e.g. `2330` or `0415` - * Relative: +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half -* `DB_DUMP_CRON`: Set the dump schedule using standard [crontab syntax](https://en.wikipedia.org/wiki/Cron), a single line. -* `RUN_ONCE`: Run the backup once and exit if `RUN_ONCE` is set. Useful if you use an external scheduler (e.g. as part of an orchestration solution like Cattle or Docker Swarm or [kubernetes cron jobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/)) and don't want the container to do the scheduling internally. If you use this option, all other scheduling options, like `DB_DUMP_FREQ` and `DB_DUMP_BEGIN` and `DB_DUMP_CRON`, become obsolete. -* `DB_DUMP_DEBUG`: If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. -* `DB_DUMP_TARGET`: Where to put the dump file, should be a directory. Supports four formats: - * Local: If the value of `DB_DUMP_TARGET` starts with a `/` character, will dump to a local path, which should be volume-mounted. - * SMB: If the value of `DB_DUMP_TARGET` is a URL of the format `smb://hostname/share/path/` then it will connect via SMB. - * S3: If the value of `DB_DUMP_TARGET` is a URL of the format `s3://bucketname/path` then it will connect via awscli. - * Multiple: If the value of `DB_DUMP_TARGET` contains multiple targets, the targets should be separated by a whitespace **and** the value surrounded by quotes, e.g. `"/db s3://bucketname/path"`. -* `DB_DUMP_SAFECHARS`: The dump filename usually includes the character `:` in the date, to comply with RFC3339. Some systems and shells don't like that character. If this environment variable is set, it will replace all `:` with `-`. -* `AWS_ACCESS_KEY_ID`: AWS Key ID -* `AWS_SECRET_ACCESS_KEY`: AWS Secret Access Key -* `AWS_DEFAULT_REGION`: Region in which the bucket resides -* `AWS_ENDPOINT_URL`: Specify an alternative endpoint for s3 interopable systems e.g. Digitalocean -* `AWS_CLI_OPTS`: Additional arguments to be passed to the `aws` part of the `aws s3 cp` command, click [here](https://docs.aws.amazon.com/cli/latest/reference/#options) for a list. _Be careful_, as you can break something! -* `AWS_CLI_S3_CP_OPTS`: Additional arguments to be passed to the `s3 cp` part of the `aws s3 cp` command, click [here](https://docs.aws.amazon.com/cli/latest/reference/s3/cp.html#options) for a list. If you are using AWS KMS, `sse`, `sse-kms-key-id`, etc., may be of interest. -* `SMB_USER`: SMB username. May also be specified in `DB_DUMP_TARGET` with an `smb://` url. If both specified, this variable overrides the value in the URL. -* `SMB_PASS`: SMB password. May also be specified in `DB_DUMP_TARGET` with an `smb://` url. If both specified, this variable overrides the value in the URL. -* `COMPRESSION`: Compression to use. Supported are: `gzip` (default), `bzip2` -* `DB_DUMP_BY_SCHEMA`: Whether to use separate files per schema in the compressed file (`true`), or a single dump file (`false`). Defaults to `false`. -* `DB_DUMP_KEEP_PERMISSIONS`: Whether to keep permissions for a file target. By default, `mysql-backup` copies the backup compressed file to the target with `cp -a`. In certain filesystems with certain permissions, this may cause errors. You can disable the `-a` flag by setting `DB_DUMP_KEEP_PERMISSIONS=false`. Defaults to `true`. -* `MYSQLDUMP_OPTS`: A string of options to pass to `mysqldump`, e.g. `MYSQLDUMP_OPTS="--opt abc --param def --max_allowed_packet=123455678"` will run `mysqldump --opt abc --param def --max_allowed_packet=123455678` -* `NICE`: true to perform mysqldump with ionice and nice option:- check for more information :- http://eosrei.net/articles/2013/03/forcing-mysqldump-always-be-nice-cpu-and-io -* `TMP_PATH`: tmp directory to be used during backup creation and other operations. Optional, defaults to `/tmp` - -### Scheduling -There are several options for scheduling how often a backup should run: - -* `RUN_ONCE`: run just once and exit. -* `DB_DUMP_FREQ` and `DB_DUMP_BEGIN`: run every x minutes, and run the first one at a particular time. -* `DB_DUMP_CRON`: run on a schedule. - -#### Cron Scheduling -If a cron-scheduled backup takes longer than the beginning of the next backup window, it will be skipped. For example, if your cron line is scheduled to backup every hour, as follows: - -``` -0 * * * * -``` - -And the backup that runs at 13:00 finishes at 14:05, the next backup will not be immediate, but rather at 15:00. - -The cron algorithm is as follows: after each backup run, calculate the next time that the cron statement will be true and schedule the backup then. +## Versions -#### Order of Priority -The scheduling options have an order of priority: - -1. `RUN_ONCE` runs once, immediately, and exits, ignoring everything else. -2. `DB_DUMP_CRON`: runs according to the cron schedule, ignoring `DB_DUMP_FREQ` and `DB_DUMP_BEGIN`. -3. `DB_DUMP_FREQ` and `DB_DUMP_BEGIN`: if nothing else is set. +This is the latest version, based on the complete rebuild of the codebase for 1.0.0 release based on +golang, completed in late 2023. The README for versions prior to 1.0.0, based on bash, is available +[here](./README-bash.md). +## Support +Support is available at the [databack Slack channel](http://databack.slack.com); register [here](https://join.slack.com/t/databack/shared_invite/zt-1cnbo2zfl-0dQS895icOUQy31RAruf7w). We accept issues here and general support questions on Slack. -### Permissions -By default, the backup/restore process does **not** run as root (UID O). Whenever possible, you should run processes (not just in containers) as users other than root. In this case, it runs as username `appuser` with UID/GID `1005`. +If you are interested in commercial support, please contact us via Slack above. -In most scenarios, this will not affect your backup process negatively. However, if you are using the "Local" dump target, i.e. your `DB_DUMP_TARGET` starts with `/` - and, most likely, is a volume mounted into the container - you can run into permissions issues. For example, if your mounted directory is owned by root on the host, then the backup process will be unable to write to it. +## Running `mysql-backup` -In this case, you have two options: +`mysql-backup` is available both as a single standalone binary, and as a container image. -* Run the container as root, `docker run --user 0 ... ` or, in i`docker-compose.yml`, `user: "0"` -* Ensure your mounted directory is writable as UID or GID `1005`. +## Backup +To run a backup, launch `mysql-backup` - as a container or as a binary - with the correct parameters. -### Database Container -In order to perform the actual dump, `mysql-backup` needs to connect to the database container. You **must** pass the database hostname - which can be another container or any database process accessible from the backup container - by passing the environment variable `DB_SERVER` with the hostname or IP address of the database. You **may** override the default port of `3306` by passing the environment variable `DB_PORT`. +For example: ````bash -docker run -d --restart=always -e DB_USER=user123 -e DB_PASS=pass123 -e DB_DUMP_FREQ=60 -e DB_DUMP_BEGIN=2330 -e DB_DUMP_TARGET=/db -e DB_SERVER=my-db-container -v /local/file/path:/db databack/mysql-backup -```` - -### Dump Target - -The dump target is where you want the backup files to be saved. The backup file *always* is a compressed file the following format: - -`db_backup_YYYY-MM-DDTHH:mm:ssZ.` - -Where the date is RFC3339 date format, excluding the milliseconds portion. - -* YYYY = year in 4 digits -* MM = month number from 01-12 -* DD = date for 01-31 -* HH = hour from 00-23 -* mm = minute from 00-59 -* ss = seconds from 00-59 -* T = literal character `T`, indicating the separation between date and time portions -* Z = literal character `Z`, indicating that the time provided is UTC, or "Zulu" -* compression = appropriate file ending for selected compression, one of: `gz` (gzip, default); `bz2` (bzip2) - -The time used is UTC time at the moment the dump begins. - -Notes on format: - -* SMB does not allow for `:` in a filename (depending on server options), so they are replaced with the `-` character when writing to SMB. -* Some shells do not handle a `:` in the filename gracefully. Although these usually are legitimate characters as far as the _filesystem_ is concerned, your shell may not like it. To avoid this issue, you can set the "no-colons" options with the environment variable `DB_DUMP_SAFECHARS` - -The dump target is the location where the dump should be placed, defaults to `/backup` in the container. Of course, having the backup in the container does not help very much, so we very strongly recommend you volume mount it outside somewhere. See the above example. - -If you use a URL like `smb://host/share/path`, you can have it save to an SMB server. If you need loging credentials, use `smb://user:pass@host/share/path`. - -Note that for smb, if the username includes a domain, e.g. your user is `mydom\myuser`, then you should use the samb convention of replacing the '\' with a ';'. In other words `smb://mydom;myuser:pass@host/share/path` - -If you use a URL like `s3://bucket/path`, you can have it save to an S3 bucket. - -Note that for s3, you'll need to specify your AWS credentials and default AWS region via `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `AWS_DEFAULT_REGION` - -Also note that if you are using an s3 interopable storage system like DigitalOcean you can use that as the target by setting `AWS_ENDPOINT_URL` to `${REGION_NAME}.digitaloceanspaces.com` and setting `DB_DUMP_TARGET` to `s3://bucketname/path`. - -#### Custom backup source file name -There may be use-cases where you need to modify the source path of the backup file **before** it gets uploaded to the dump target. -An example is combining multiple compressed files into one and giving it a new name, i.e. ```db-other-files-combined.tar.gz```. -To do that, place an executable file called `source.sh` in the following path: - - /scripts.d/source.sh - -Whatever your script returns to _stdout_ will be used as the source name for the backup file. - -The following exported environment variables will be available to the script above: - -* `DUMPFILE`: full path in the container to the output file -* `NOW`: date of the backup, as included in `DUMPFILE` and given by `date -u +"%Y-%m-%dT%H:%M:%SZ"` -* `DUMPDIR`: path to the destination directory so for example you can copy a new tarball including some other files along with the sql dump. -* `DB_DUMP_DEBUG`: To enable debug mode in post-backup scripts. +docker run -d --restart=always -e DB_DUMP_FREQ=60 -e DB_DUMP_BEGIN=2330 -e DB_DUMP_TARGET=/local/file/path -e DB_SERVER=my-db-address -v /local/file/path:/db databack/mysql-backup -**Example run:** +# or - NOW=20180930151304 DUMPFILE=/tmp/backups/db_backup_201809301513.gz DUMPDIR=/backup DB_DUMP_DEBUG=true /scripts.d/source.sh - -**Example custom source script:** - -```bash - #!/bin/bash - - # Rename source file - echo -n "db-plus-wordpress_${NOW}.gz" -``` - -#### Custom backup target file name -There may be use-cases where you need to modify the target upload path of the backup file **before** it gets uploaded. -An example is uploading a backup to a date stamped object key path in S3, i.e. ```s3://bucket/2018/08/23/path```. -To do that, place an executable file called ```target.sh``` in the following path: - - /scripts.d/target.sh - -Whatever your script returns to _stdout_ will be used as the name for the backup file. - -The following exported environment variables will be available to the script above: - -* `DUMPFILE`: full path in the container to the output file -* `NOW`: date of the backup, as included in `DUMPFILE` and given by `date -u +"%Y-%m-%dT%H:%M:%SZ"` -* `DUMPDIR`: path to the destination directory so for example you can copy a new tarball including some other files along with the sql dump. -* `DB_DUMP_DEBUG`: To enable debug mode in post-backup scripts. - -**Example run:** - - NOW=20180930151304 DUMPFILE=/tmp/backups/db_backup_201809301513.gz DUMPDIR=/backup DB_DUMP_DEBUG=true /scripts.d/target.sh - -**Example custom target script:** +mysql-backup dump --frequency=60 --begin=2330 --target=/local/file/path --server=my-db-address +```` -```bash - #!/bin/bash +Or `mysql-backup --config-file=/path/to/config/file.yaml` where `/path/to/config/file.yaml` is a file +with the following contents: - # Rename target file - echo -n "db-plus-wordpress-uploaded_${NOW}.gz" +```yaml +server: my-db-address +dump: + frequency: 60 + begin: 2330 + target: /local/file/path ``` -### Backup pre and post processing - -Any executable script with _.sh_ extension in _/scripts.d/pre-backup/_ or _/scripts.d/post-backup/_ directories in the container will be executed before -and after the backup dump process has finished respectively, but **before** -uploading the backup file to its ultimate target. This is useful if you need to -include some files along with the database dump, for example, to backup a -_WordPress_ install. - -To use them you need to add a host volume that points to the post-backup scripts in the docker host. Start the container like this: +The above will run a dump every 60 minutes, beginning at the next 2330 local time, from the database accessible in the container `my-db-address`. ````bash -docker run -d --restart=always -e DB_USER=user123 -e DB_PASS=pass123 -e DB_DUMP_FREQ=60 \ - -e DB_DUMP_BEGIN=2330 -e DB_DUMP_TARGET=/db -e DB_SERVER=my-db-container:db \ - -v /path/to/pre-backup/scripts:/scripts.d/pre-backup \ - -v /path/to/post-backup/scripts:/scripts.d/post-backup \ - -v /local/file/path:/db \ - databack/mysql-backup -```` - -Or, if you prefer compose: - -```yml -version: '2.1' -services: - backup: - image: databack/mysql-backup - restart: always - volumes: - - /local/file/path:/db - - /path/to/pre-backup/scripts:/scripts.d/pre-backup - - /path/to/post-backup/scripts:/scripts.d/post-backup - env: - - DB_DUMP_TARGET=/db - - DB_USER=user123 - - DB_PASS=pass123 - - DB_DUMP_FREQ=60 - - DB_DUMP_BEGIN=2330 - - DB_SERVER=mysql_db - mysql_db: - image: mysql - .... -``` - -The scripts are _executed_ in the [entrypoint](https://github.com/databack/mysql-backup/blob/master/entrypoint) script, which means it has access to all exported environment variables. The following are available, but we are happy to export more as required (just open an issue or better yet, a pull request): - -* `DUMPFILE`: full path in the container to the output file -* `NOW`: date of the backup, as included in `DUMPFILE` and given by `date -u +"%Y-%m-%dT%H:%M:%SZ"` -* `DUMPDIR`: path to the destination directory so for example you can copy a new tarball including some other files along with the sql dump. -* `DB_DUMP_DEBUG`: To enable debug mode in post-backup scripts. +docker run -d --restart=always -e DB_USER=user123 -e DB_PASS=pass123 -e DB_DUMP_FREQ=60 -e DB_DUMP_BEGIN=2330 -e DB_DUMP_TARGET=/db -e DB_SERVER=my-db-address -v /local/file/path:/db databack/mysql-backup -In addition, all of the environment variables set for the container will be available to the script. - -For example, the following script will rename the backup file after the dump is done: - -````bash -#!/bin/bash -# Rename backup file. -if [[ -n "$DB_DUMP_DEBUG" ]]; then - set -x -fi - -if [ -e ${DUMPFILE} ]; -then - now=$(date +"%Y-%m-%d-%H_%M") - new_name=db_backup-${now}.gz - old_name=$(basename ${DUMPFILE}) - echo "Renaming backup file from ${old_name} to ${new_name}" - mv ${DUMPFILE} ${DUMPDIR}/${new_name} -else - echo "ERROR: Backup file ${DUMPFILE} does not exist!" -fi +# or +mysql-backup dump --user=user123 --pass=pass123 --frequency=60 --begin=2330 --target=/local/file/path --server=my-db-address --port=3306 ```` -You can think of this as a sort of basic plugin system. Look at the source of the [entrypoint](https://github.com/databack/mysql-backup/blob/master/entrypoint) script for other variables that can be used. +See [backup](./docs/backup.md) for a more detailed description of performing backups. -### Encrypting the Backup +See [configuration](./docs/configuration.md) for a detailed list of all configuration options. -Post-processing also give you options to encrypt the backup using openssl. The openssl binary is available -to the processing scripts. - -The sample [examples/encrypt.sh](./examples/encrypt.sh) provides a sample post-processing script that you can use -to encrypt your backup with AES256. ## Restore + +To perform a restore, you simply run the process in reverse. You still connect to a database, but instead of the +dump command, you pass it the restore command. Instead of a dump target, you pass it a restore target. + ### Dump Restore + If you wish to run a restore to an existing database, you can use mysql-backup to do a restore. You need only the following environment variables: @@ -308,17 +93,15 @@ __You should consider the [use of `--env-file=`](https://docs.docker.com/engine/ * `DB_NAMES`: names of databases to restore separated by spaces. Required if `SINGLE_DATABASE=true`. * `SINGLE_DATABASE`: If is set to `true`, `DB_NAMES` is required and must contain exactly one database name. Mysql command will then run with `--database=$DB_NAMES` flag. This avoids the need of `USE ;` statement, which is useful when restoring from a file saved with `SINGLE_DATABASE` set to `true`. * `DB_RESTORE_TARGET`: path to the actual restore file, which should be a compressed dump file. The target can be an absolute path, which should be volume mounted, an smb or S3 URL, similar to the target. -* `RESTORE_OPTS`: A string of options to pass to `mysql` restore command, e.g. `--ssl-cert /certs/client-cert.pem --ssl-key /certs/client-key.pem` will run `mysql --ssl-cert /certs/client-cert.pem --ssl-key /certs/client-key.pem -h $DB_SERVER -P $DB_PORT $DBUSER $DBPASS $DBDATABASE`, default is empty ('') * `DB_DUMP_DEBUG`: if `true`, dump copious outputs to the container logs while restoring. * To use the S3 driver `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `AWS_DEFAULT_REGION` will need to be defined. - Examples: 1. Restore from a local file: `docker run -e DB_SERVER=gotodb.example.com -e DB_USER=user123 -e DB_PASS=pass123 -e DB_RESTORE_TARGET=/backup/db_backup_201509271627.gz -v /local/path:/backup databack/mysql-backup` 2. Restore from a local file using ssl: `docker run -e DB_SERVER=gotodb.example.com -e DB_USER=user123 -e DB_PASS=pass123 -e DB_RESTORE_TARGET=/backup/db_backup_201509271627.gz -e RESTORE_OPTS="--ssl-cert /certs/client-cert.pem --ssl-key /certs/client-key.pem" -v /local/path:/backup -v /local/certs:/certs databack/mysql-backup` -3. Restore from an SMB file: `docker run -e DB_SERVER=gotodb.example.com -e DB_USER=user123 -e DB_PASS=pass123 -e DB_RESTORE_TARGET=smb://smbserver/share1/backup/db_backup_201509271627.gz databack/mysql-backup` -4. Restore from an S3 file: `docker run -e DB_SERVER=gotodb.example.com -e AWS_ACCESS_KEY_ID=awskeyid -e AWS_SECRET_ACCESS_KEY=secret -e AWS_DEFAULT_REGION=eu-central-1 -e DB_USER=user123 -e DB_PASS=pass123 -e DB_RESTORE_TARGET=s3://bucket/path/db_backup_201509271627.gz databack/mysql-backup` +2. Restore from an SMB file: `docker run -e DB_SERVER=gotodb.example.com -e DB_USER=user123 -e DB_PASS=pass123 -e DB_RESTORE_TARGET=smb://smbserver/share1/backup/db_backup_201509271627.gz databack/mysql-backup` +3. Restore from an S3 file: `docker run -e DB_SERVER=gotodb.example.com -e AWS_ACCESS_KEY_ID=awskeyid -e AWS_SECRET_ACCESS_KEY=secret -e AWS_REGION=eu-central-1 -e DB_USER=user123 -e DB_PASS=pass123 -e DB_RESTORE_TARGET=s3://bucket/path/db_backup_201509271627.gz databack/mysql-backup` ### Restore specific databases If you have multiple schemas in your database, you can choose to restore only some of them. @@ -335,63 +118,9 @@ When doing this, schemas will be restored with their original name. To restore u * `docker run -e DB_SERVER=gotodb.example.com -e DB_USER=user123 -e DB_PASS=pass123 -e SINGLE_DATABASE=true -e DB_NAMES=database1 -v /local/path:/backup databack/mysql-backup` * `docker run -e DB_SERVER=gotodb.example.com -e DB_USER=user123 -e DB_PASS=pass123 -e DB_RESTORE_TARGET=/backup/db_backup_201509271627.gz -e SINGLE_DATABASE=true DB_NAMES=newdatabase1 -v /local/path:/backup databack/mysql-backup` -### Restore when using docker-compose -`docker-compose` automagically creates a network when started. `docker run` simply attaches to the bridge network. If you are trying to communicate with a mysql container started by docker-compose, you'll need to specify the network in your command arguments. You can use `docker network ls` to see what network is being used, or you can declare a network in your docker-compose.yml. - -#### Example: -`docker run -e DB_SERVER=gotodb.example.com -e DB_USER=user123 -e DB_PASS=pass123 -e DB_RESTORE_TARGET=/backup/db_backup_201509271627.gz -v /local/path:/backup --network="skynet" databack/mysql-backup` - -### Using docker (or rancher) secrets -Environment variables used in this image can be passed in files as well. This is useful when you are using docker (or rancher) secrets for storing sensitive information. - -As you can set environment variable with `-e ENVIRONMENT_VARIABLE=value`, you can also use `-e ENVIRONMENT_VARIABLE_FILE=/path/to/file`. Contents of that file will be assigned to the environment variable. - -**Example:** - -```bash -docker run -d \ - -e DB_HOST_FILE=/run/secrets/DB_HOST \ - -e DB_USER_FILE=/run/secrets/DB_USER \ - -e DB_PASS_FILE=/run/secrets/DB_PASS \ - -v /local/file/path:/db \ - databack/mysql-backup -``` - -### Restore pre and post processing - -As with backups pre and post processing, you can do the same with restore operations. -Any executable script with _.sh_ extension in _/scripts.d/pre-restore/_ or -_/scripts.d/post-restore/_ directories in the container will be executed before the restore process starts and after it finishes respectively. This is useful if you need to -restore a backup file that includes some files along with the database dump. - -For example, to restore a _WordPress_ install, you would uncompress a tarball containing -the db backup and a second tarball with the contents of a WordPress install on -`pre-restore`. Then on `post-restore`, uncompress the WordPress files on the container's web server root directory. - -For an example take a look at the post-backup examples, all variables defined for post-backup scripts are available for pre-processing too. Also don't forget to add the same host volumes for `pre-restore` and `post-restore` directories as described for post-backup processing. - -### Automated Build -This github repo is the source for the mysql-backup image. The actual image is stored on the docker hub at `databack/mysql-backup`, and is triggered with each commit to the source by automated build via Webhooks. - -There are 2 builds: 1 for version based on the git tag, and another for the particular version number. - -## Tests - -The tests all run in docker containers, to avoid the need to install anything other than `make` and `docker`, and even can run over remote docker connections, avoiding any local bind-mounts. To run all tests: - -``` -make test -``` - -To run with debugging - -``` -make test DEBUG=debug -``` - -The above will generate _copious_ outputs, so you might want to redirect stdout and stderr to a file. +See [restore](./docs/restore.md) for a more detailed description of performing restores. -This runs each of the several testing targets, each of which is a script in `test/test_*.sh`, which sets up tests, builds containers, runs the tests, and collects the output. +See [configuration](./docs/configuration.md) for a detailed list of all configuration options. ## License Released under the MIT License. diff --git a/TODO.md b/TODO.md new file mode 100644 index 00000000..5a2eca69 --- /dev/null +++ b/TODO.md @@ -0,0 +1,8 @@ +# Golang TODO + +* parsing non-transferred cmdline options + +## Non-transferred cmdline options + +* `AWS_CLI_OPTS`: Additional arguments to be passed to the `aws` part of the `aws s3 cp` command, click [here](https://docs.aws.amazon.com/cli/latest/reference/#options) for a list. _Be careful_, as you can break something! +* `AWS_CLI_S3_CP_OPTS`: Additional arguments to be passed to the `s3 cp` part of the `aws s3 cp` command, click [here](https://docs.aws.amazon.com/cli/latest/reference/s3/cp.html#options) for a list. If you are using AWS KMS, `sse`, `sse-kms-key-id`, etc., may be of interest. diff --git a/cmd/common_test.go b/cmd/common_test.go new file mode 100644 index 00000000..425cfd2f --- /dev/null +++ b/cmd/common_test.go @@ -0,0 +1,28 @@ +package cmd + +import ( + "github.com/databacker/mysql-backup/pkg/compression" + "github.com/databacker/mysql-backup/pkg/core" + "github.com/databacker/mysql-backup/pkg/database" + "github.com/databacker/mysql-backup/pkg/storage" + "github.com/stretchr/testify/mock" +) + +type mockExecs struct { + mock.Mock +} + +func newMockExecs() *mockExecs { + m := &mockExecs{} + return m +} + +func (m *mockExecs) timerDump(opts core.DumpOptions, timerOpts core.TimerOptions) error { + args := m.Called(opts, timerOpts) + return args.Error(0) +} + +func (m *mockExecs) restore(target storage.Storage, targetFile string, dbconn database.Connection, databasesMap map[string]string, compressor compression.Compressor) error { + args := m.Called(target, targetFile, dbconn, databasesMap, compressor) + return args.Error(0) +} diff --git a/cmd/dump.go b/cmd/dump.go new file mode 100644 index 00000000..2e4a5235 --- /dev/null +++ b/cmd/dump.go @@ -0,0 +1,207 @@ +package cmd + +import ( + "fmt" + "strings" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/spf13/viper" + + "github.com/databacker/mysql-backup/pkg/core" + "github.com/databacker/mysql-backup/pkg/storage" +) + +const ( + defaultCompression = "gzip" + defaultBegin = "+0" + defaultFrequency = 1440 + defaultMaxAllowedPacket = 4194304 +) + +func dumpCmd(execs execs) (*cobra.Command, error) { + var v *viper.Viper + var cmd = &cobra.Command{ + Use: "dump", + Aliases: []string{"backup"}, + Short: "backup a database", + Long: `Backup a database to a target location, once or on a schedule. + Can choose to dump all databases, only some by name, or all but excluding some. + The databases "information_schema", "performance_schema", "sys" and "mysql" are + excluded by default, unless you explicitly list them.`, + PreRun: func(cmd *cobra.Command, args []string) { + bindFlags(cmd, v) + }, + RunE: func(cmd *cobra.Command, args []string) error { + log.Debug("starting dump") + // check targets + targetURLs := v.GetStringSlice("target") + var ( + targets []storage.Storage + err error + ) + if len(targetURLs) > 0 { + for _, t := range targetURLs { + store, err := storage.ParseURL(t, creds) + if err != nil { + return fmt.Errorf("invalid target url: %v", err) + } + targets = append(targets, store) + } + } else { + // try the config file + if configuration != nil { + // parse the target objects, then the ones listed for the backup + targetStructures := configuration.Targets + dumpTargets := configuration.Dump.Targets + for _, t := range dumpTargets { + var store storage.Storage + if target, ok := targetStructures[t]; !ok { + return fmt.Errorf("target %s from dump configuration not found in targets configuration", t) + } else { + store, err = target.Storage() + if err != nil { + return fmt.Errorf("target %s from dump configuration has invalid URL: %v", t, err) + } + } + targets = append(targets, store) + } + } + } + if len(targets) == 0 { + return fmt.Errorf("no targets specified") + } + safechars := v.GetBool("safechars") + if !v.IsSet("safechars") && configuration != nil { + safechars = configuration.Dump.Safechars + } + include := v.GetStringSlice("include") + if len(include) == 0 && configuration != nil { + include = configuration.Dump.Include + } + exclude := v.GetStringSlice("exclude") + if len(exclude) == 0 && configuration != nil { + exclude = configuration.Dump.Exclude + } + preBackupScripts := v.GetString("pre-backup-scripts") + if preBackupScripts == "" && configuration != nil { + preBackupScripts = configuration.Dump.Scripts.PreBackup + } + noDatabaseName := v.GetBool("no-database-name") + if !v.IsSet("no-database-name") && configuration != nil { + noDatabaseName = configuration.Dump.NoDatabaseName + } + compact := v.GetBool("compact") + if !v.IsSet("compact") && configuration != nil { + compact = configuration.Dump.Compact + } + maxAllowedPacket := v.GetInt("max-allowed-packet") + if !v.IsSet("max-allowed-packet") && configuration != nil { + maxAllowedPacket = configuration.Dump.MaxAllowedPacket + } + + dumpOpts := core.DumpOptions{ + Targets: targets, + Safechars: safechars, + DBNames: include, + DBConn: dbconn, + Compressor: compressor, + Exclude: exclude, + PreBackupScripts: preBackupScripts, + PostBackupScripts: preBackupScripts, + SuppressUseDatabase: noDatabaseName, + Compact: compact, + MaxAllowedPacket: maxAllowedPacket, + } + + // timer options + once := v.GetBool("once") + if !v.IsSet("once") && configuration != nil { + once = configuration.Dump.Schedule.Once + } + cron := v.GetString("cron") + if cron == "" && configuration != nil { + cron = configuration.Dump.Schedule.Cron + } + begin := v.GetString("begin") + if begin == "" && configuration != nil { + begin = configuration.Dump.Schedule.Begin + } + frequency := v.GetInt("frequency") + if frequency == 0 && configuration != nil { + frequency = configuration.Dump.Schedule.Frequency + } + timerOpts := core.TimerOptions{ + Once: once, + Cron: cron, + Begin: begin, + Frequency: frequency, + } + dump := core.TimerDump + if execs != nil { + dump = execs.timerDump + } + if err := dump(dumpOpts, timerOpts); err != nil { + return err + } + log.Info("Backup complete") + return nil + }, + } + + v = viper.New() + v.SetEnvPrefix("db_dump") + v.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) + v.AutomaticEnv() + + flags := cmd.Flags() + // target - where the backup is to be saved + flags.StringSlice("target", []string{}, `full URL target to where the backups should be saved. Should be a directory. Accepts multiple targets. Supports three formats: +Local: If if starts with a "/" character of "file:///", will dump to a local path, which should be volume-mounted. +SMB: If it is a URL of the format smb://hostname/share/path/ then it will connect via SMB. +S3: If it is a URL of the format s3://bucketname/path then it will connect via S3 protocol.`) + if err := cmd.MarkFlagRequired("target"); err != nil { + return nil, err + } + + // include - include of databases to back up + flags.StringSlice("include", []string{}, "names of databases to dump; empty to do all") + + // exclude + flags.StringSlice("exclude", []string{}, "databases to exclude from the dump.") + + // single database, do not include `USE database;` in dump + flags.Bool("no-database-name", false, "Omit `USE ;` in the dump, so it can be restored easily to a different database.") + + // frequency + flags.Int("frequency", defaultFrequency, "how often to run backups, in minutes") + + // begin + flags.String("begin", defaultBegin, "What time to do the first dump. Must be in one of two formats: Absolute: HHMM, e.g. `2330` or `0415`; or Relative: +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half") + + // cron + flags.String("cron", "", "Set the dump schedule using standard [crontab syntax](https://en.wikipedia.org/wiki/Cron), a single line.") + + // once + flags.Bool("once", false, "Override all other settings and run the dump once immediately and exit. Useful if you use an external scheduler (e.g. as part of an orchestration solution like Cattle or Docker Swarm or [kubernetes cron jobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/)) and don't want the container to do the scheduling internally.") + + // safechars + flags.Bool("safechars", false, "The dump filename usually includes the character `:` in the date, to comply with RFC3339. Some systems and shells don't like that character. If true, will replace all `:` with `-`.") + + // compression + flags.String("compression", defaultCompression, "Compression to use. Supported are: `gzip`, `bzip2`") + + // source filename pattern + flags.String("filename-pattern", "db_backup_{{ .now }}.{{ .compression }}", "Pattern to use for filename in target. See documentation.") + + // pre-backup scripts + flags.String("pre-backup-scripts", "", "Directory wherein any file ending in `.sh` will be run pre-backup.") + + // post-backup scripts + flags.String("post-backup-scripts", "", "Directory wherein any file ending in `.sh` will be run post-backup but pre-send to target.") + + // max-allowed-packet size + flags.Int("max-allowed-packet", defaultMaxAllowedPacket, "Maximum size of the buffer for client/server communication, similar to mysqldump's max_allowed_packet. 0 means to use the default size.") + + return cmd, nil +} diff --git a/cmd/dump_test.go b/cmd/dump_test.go new file mode 100644 index 00000000..32e7fd3b --- /dev/null +++ b/cmd/dump_test.go @@ -0,0 +1,72 @@ +package cmd + +import ( + "net/url" + "testing" + + "github.com/databacker/mysql-backup/pkg/core" + "github.com/databacker/mysql-backup/pkg/storage" + "github.com/databacker/mysql-backup/pkg/storage/file" + "github.com/go-test/deep" + "github.com/stretchr/testify/mock" +) + +func TestDumpCmd(t *testing.T) { + t.Parallel() + + fileTarget := "file:///foo/bar" + fileTargetURL, _ := url.Parse(fileTarget) + tests := []struct { + name string + args []string // "restore" will be prepended automatically + config string + wantErr bool + expectedDumpOptions core.DumpOptions + expectedTimerOptions core.TimerOptions + }{ + {"missing server and target options", []string{""}, "", true, core.DumpOptions{}, core.TimerOptions{}}, + {"invalid target URL", []string{"--server", "abc", "--target", "def"}, "", true, core.DumpOptions{}, core.TimerOptions{}}, + {"file URL", []string{"--server", "abc", "--target", "file:///foo/bar"}, "", false, core.DumpOptions{ + Targets: []storage.Storage{file.New(*fileTargetURL)}, + MaxAllowedPacket: defaultMaxAllowedPacket, + DBNames: []string{}, + Exclude: []string{}, + }, core.TimerOptions{Frequency: defaultFrequency, Begin: defaultBegin}}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := newMockExecs() + m.On("timerDump", mock.MatchedBy(func(dumpOpts core.DumpOptions) bool { + diff := deep.Equal(dumpOpts, tt.expectedDumpOptions) + if diff == nil { + return true + } + t.Errorf("dumpOpts compare failed: %v", diff) + return false + }), mock.MatchedBy(func(timerOpts core.TimerOptions) bool { + diff := deep.Equal(timerOpts, tt.expectedTimerOptions) + if diff == nil { + return true + } + t.Errorf("timerOpts compare failed: %v", diff) + return false + })).Return(nil) + + cmd, err := rootCmd(m) + if err != nil { + t.Fatal(err) + } + cmd.SetArgs(append([]string{"dump"}, tt.args...)) + err = cmd.Execute() + switch { + case err == nil && tt.wantErr: + t.Fatal("missing error") + case err != nil && !tt.wantErr: + t.Fatal(err) + case err == nil: + m.AssertExpectations(t) + } + }) + } +} diff --git a/cmd/restore.go b/cmd/restore.go new file mode 100644 index 00000000..a9fcdff0 --- /dev/null +++ b/cmd/restore.go @@ -0,0 +1,108 @@ +package cmd + +import ( + "fmt" + "strings" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/spf13/viper" + + "github.com/databacker/mysql-backup/pkg/core" + "github.com/databacker/mysql-backup/pkg/storage" + "github.com/databacker/mysql-backup/pkg/util" +) + +func restoreCmd(execs execs) (*cobra.Command, error) { + var v *viper.Viper + var cmd = &cobra.Command{ + Use: "restore", + Short: "restore a dump", + Long: `Restore a database dump from a given location.`, + PreRun: func(cmd *cobra.Command, args []string) { + bindFlags(cmd, v) + }, + Args: cobra.MinimumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + log.Debug("starting restore") + targetFile := args[0] + target := v.GetString("target") + // get databases namesand mappings + databasesMap := make(map[string]string) + databases := strings.TrimSpace(v.GetString("database")) + if databases != "" { + for _, db := range strings.Split(databases, ",") { + parts := strings.SplitN(db, ":", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid database mapping: %s", db) + } + databasesMap[parts[0]] = parts[1] + } + } + // target URL can reference one from the config file, or an absolute one + // if it's not in the config file, it's an absolute one + // if it is in the config file, it's a reference to one of the targets in the config file + u, err := util.SmartParse(target) + if err != nil { + return fmt.Errorf("invalid target url: %v", err) + } + var store storage.Storage + if u.Scheme == "config" { + // get the target name + targetName := u.Host + // get the target from the config file + if configuration == nil { + return fmt.Errorf("no configuration file found") + } + if target, ok := configuration.Targets[targetName]; !ok { + return fmt.Errorf("target %s not found in configuration", targetName) + } else { + if store, err = target.Storage(); err != nil { + return fmt.Errorf("error creating storage for target %s: %v", targetName, err) + } + } + // need to add the path to the specific target file + } else { + // parse the target URL + store, err = storage.ParseURL(target, creds) + if err != nil { + return fmt.Errorf("invalid target url: %v", err) + } + } + restore := core.Restore + if execs != nil { + restore = execs.restore + } + if err := restore(store, targetFile, dbconn, databasesMap, compressor); err != nil { + return fmt.Errorf("error restoring: %v", err) + } + log.Info("Restore complete") + return nil + }, + } + // target - where the backup is + v = viper.New() + v.SetEnvPrefix("db_restore") + v.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) + v.AutomaticEnv() + + flags := cmd.Flags() + flags.String("target", "", "full URL target to the backup that you wish to restore") + if err := cmd.MarkFlagRequired("target"); err != nil { + return nil, err + } + + // compression + flags.String("compression", defaultCompression, "Compression to use. Supported are: `gzip`, `bzip2`") + + // specific database to which to restore + flags.String("database", "", "Mapping of from:to database names to which to restore, comma-separated, e.g. foo:bar,buz:qux. Replaces the `USE ` clauses in a backup file. If blank, uses the file as is.") + + // pre-restore scripts + flags.String("pre-restore-scripts", "", "Directory wherein any file ending in `.sh` will be run after retrieving the dump file but pre-restore.") + + // post-restore scripts + flags.String("post-restore-scripts", "", "Directory wherein any file ending in `.sh` will be run post-restore.") + + return cmd, nil +} diff --git a/cmd/restore_test.go b/cmd/restore_test.go new file mode 100644 index 00000000..46aaa049 --- /dev/null +++ b/cmd/restore_test.go @@ -0,0 +1,58 @@ +package cmd + +import ( + "net/url" + "testing" + + "github.com/databacker/mysql-backup/pkg/compression" + "github.com/databacker/mysql-backup/pkg/database" + "github.com/databacker/mysql-backup/pkg/storage" + "github.com/databacker/mysql-backup/pkg/storage/file" +) + +func TestRestoreCmd(t *testing.T) { + t.Parallel() + + fileTarget := "file:///foo/bar" + fileTargetURL, _ := url.Parse(fileTarget) + + tests := []struct { + name string + args []string // "restore" will be prepended automatically + config string + wantErr bool + expectedTarget storage.Storage + expectedTargetFile string + expectedDbconn database.Connection + expectedDatabasesMap map[string]string + expectedCompressor compression.Compressor + }{ + {"missing server and target options", []string{""}, "", true, nil, "", database.Connection{}, nil, &compression.GzipCompressor{}}, + {"invalid target URL", []string{"--server", "abc", "--target", "def"}, "", true, nil, "", database.Connection{}, nil, &compression.GzipCompressor{}}, + {"valid URL missing dump filename", []string{"--server", "abc", "--target", "file:///foo/bar"}, "", true, nil, "", database.Connection{}, nil, &compression.GzipCompressor{}}, + {"valid file URL", []string{"--server", "abc", "--target", fileTarget, "filename.tgz"}, "", false, file.New(*fileTargetURL), "filename.tgz", database.Connection{}, map[string]string{}, nil}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := newMockExecs() + m.On("restore", tt.expectedTarget, tt.expectedTargetFile, tt.expectedDbconn, tt.expectedDatabasesMap, tt.expectedCompressor).Return(nil) + cmd, err := rootCmd(m) + if err != nil { + t.Fatal(err) + } + cmd.SetArgs(append([]string{"restore"}, tt.args...)) + err = cmd.Execute() + switch { + case err == nil && tt.wantErr: + t.Fatal("missing error") + case err != nil && !tt.wantErr: + t.Fatal(err) + case err == nil: + m.AssertExpectations(t) + //m.AssertCalled(t, "restore", tt.expectedTarget, tt.expectedTargetFile, tt.expectedDbconn, tt.expectedDatabasesMap, tt.expectedCompressor) + } + + }) + } +} diff --git a/cmd/root.go b/cmd/root.go new file mode 100644 index 00000000..29b840e4 --- /dev/null +++ b/cmd/root.go @@ -0,0 +1,230 @@ +package cmd + +import ( + "fmt" + "os" + "strings" + + "github.com/databacker/mysql-backup/pkg/compression" + "github.com/databacker/mysql-backup/pkg/config" + "github.com/databacker/mysql-backup/pkg/core" + "github.com/databacker/mysql-backup/pkg/database" + "github.com/databacker/mysql-backup/pkg/storage" + "github.com/databacker/mysql-backup/pkg/storage/credentials" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "github.com/spf13/viper" + "gopkg.in/yaml.v3" +) + +type execs interface { + timerDump(opts core.DumpOptions, timerOpts core.TimerOptions) error + restore(target storage.Storage, targetFile string, dbconn database.Connection, databasesMap map[string]string, compressor compression.Compressor) error +} + +type subCommand func(execs) (*cobra.Command, error) + +var subCommands = []subCommand{dumpCmd, restoreCmd} + +const ( + defaultPort = 3306 +) + +var ( + dbconn database.Connection + creds credentials.Creds + compressor compression.Compressor + configuration *config.Config +) + +func rootCmd(execs execs) (*cobra.Command, error) { + var ( + v *viper.Viper + cmd *cobra.Command + ) + cmd = &cobra.Command{ + Use: "mysql-backup", + Short: "backup or restore one or more mysql-compatible databases", + Long: `Backup or restore one or more mysql-compatible databases. + In addition to the provided command-line flag options and environment variables, + when using s3-storage, supports the standard AWS options: + + AWS_ACCESS_KEY_ID: AWS Key ID + AWS_SECRET_ACCESS_KEY: AWS Secret Access Key + AWS_REGION: Region in which the bucket resides + `, + PersistentPreRunE: func(c *cobra.Command, args []string) error { + var err error + bindFlags(cmd, v) + logLevel := v.GetInt("verbose") + switch logLevel { + case 0: + log.SetLevel(log.InfoLevel) + case 1: + log.SetLevel(log.DebugLevel) + case 2: + log.SetLevel(log.TraceLevel) + } + + // read the config file, if needed; the structure of the config differs quite some + // from the necessarily flat env vars/CLI flags, so we can't just use viper's + // automatic config file support. + if configFile := v.GetString("config"); configFile != "" { + var ( + f *os.File + err error + config config.Config + ) + if f, err = os.Open(configFile); err != nil { + return fmt.Errorf("fatal error config file: %w", err) + } + defer f.Close() + decoder := yaml.NewDecoder(f) + if err := decoder.Decode(&config); err != nil { + return fmt.Errorf("fatal error config file: %w", err) + } + configuration = &config + } + + // the structure of our config file is more complex and with relationships than our config/env var + // so we cannot use a single viper structure, as described above. + + // set up database connection + var dbconn database.Connection + + if configuration != nil { + if configuration.Database.Server != "" { + dbconn.Host = configuration.Database.Server + } + if configuration.Database.Port != 0 { + dbconn.Port = configuration.Database.Port + } + if configuration.Database.Credentials.Username != "" { + dbconn.User = configuration.Database.Credentials.Username + } + if configuration.Database.Credentials.Password != "" { + dbconn.Pass = configuration.Database.Credentials.Password + } + } + // override config with env var or CLI flag, if set + dbHost := v.GetString("server") + if dbHost != "" { + dbconn.Host = dbHost + } + dbPort := v.GetInt("port") + if dbPort != 0 { + dbconn.Port = dbPort + } + dbUser := v.GetString("user") + if dbUser != "" { + dbconn.User = dbUser + } + dbPass := v.GetString("pass") + if dbPass != "" { + dbconn.Pass = dbPass + } + + // compression algorithm: check config, then CLI/env var overrides + var compressionAlgo string + if configuration != nil { + compressionAlgo = configuration.Dump.Compression + } + compressionVar := v.GetString("compression") + if compressionVar != "" { + compressionAlgo = compressionVar + } + if compressionAlgo != "" { + compressor, err = compression.GetCompressor(compressionAlgo) + if err != nil { + return fmt.Errorf("failure to get compression '%s': %v", compressionAlgo, err) + } + } + + // these are not from the config file, as they are generic credentials, used across all targets. + // the config file uses specific ones per target + creds = credentials.Creds{ + AWSEndpoint: v.GetString("aws-endpoint-url"), + SMBCredentials: credentials.SMBCreds{ + Username: v.GetString("smb-user"), + Password: v.GetString("smb-pass"), + Domain: v.GetString("smb-domain"), + }, + } + return nil + }, + } + + v = viper.New() + v.SetEnvPrefix("db") + v.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) + v.AutomaticEnv() + + // server hostname via CLI or env var + pflags := cmd.PersistentFlags() + pflags.String("server", "", "hostname for database server") + if err := cmd.MarkPersistentFlagRequired("server"); err != nil { + return nil, err + } + + // base of temporary directory to use + pflags.String("tmp", os.TempDir(), "temporary directory base for working directory, defaults to OS") + + // server port via CLI or env var or default + pflags.Int("port", defaultPort, "port for database server") + + // user via CLI or env var + pflags.String("user", "", "username for database server") + + // pass via CLI or env var + pflags.String("pass", "", "password for database server") + + // debug via CLI or env var or default + pflags.IntP("verbose", "v", 0, "set log level, 1 is debug, 2 is trace") + + // aws options + pflags.String("aws-endpoint-url", "", "Specify an alternative endpoint for s3 interoperable systems e.g. Digitalocean; ignored if not using s3.") + pflags.String("aws-access-key-id", "", "Access Key for s3 and s3 interoperable systems; ignored if not using s3.") + pflags.String("aws-secret-access-key", "", "Secret Access Key for s3 and s3 interoperable systems; ignored if not using s3.") + pflags.String("aws-region", "", "Region for s3 and s3 interoperable systems; ignored if not using s3.") + + // smb options + pflags.String("smb-user", "", "SMB username") + pflags.String("smb-pass", "", "SMB username") + pflags.String("smb-domain", "", "SMB domain") + + for _, subCmd := range subCommands { + if sc, err := subCmd(execs); err != nil { + return nil, err + } else { + cmd.AddCommand(sc) + } + } + + return cmd, nil +} + +// Bind each cobra flag to its associated viper configuration (config file and environment variable) +func bindFlags(cmd *cobra.Command, v *viper.Viper) { + cmd.Flags().VisitAll(func(f *pflag.Flag) { + // Determine the naming convention of the flags when represented in the config file + configName := f.Name + _ = v.BindPFlag(configName, f) + // Apply the viper config value to the flag when the flag is not set and viper has a value + if !f.Changed && v.IsSet(configName) { + val := v.Get(configName) + _ = cmd.Flags().Set(f.Name, fmt.Sprintf("%v", val)) + } + }) +} + +// Execute primary function for cobra +func Execute() { + rootCmd, err := rootCmd(nil) + if err != nil { + log.Fatal(err) + } + if err := rootCmd.Execute(); err != nil { + log.Fatal(err) + } +} diff --git a/docs/backup.md b/docs/backup.md new file mode 100644 index 00000000..3d763bfb --- /dev/null +++ b/docs/backup.md @@ -0,0 +1,365 @@ +# Backing Up + +Backing up is the process of taking backups from your database via `mysql-backup`, and saving the backup file +to a target. That target can be one of: + +* local file +* SMB remote file +* S3 bucket + +## Instructions and Examples for Backup Configuration Options + +### Database Names + +By default, all databases in the database server are backed up, and the system databases +named `information_schema`, `performance_schema`, `sys` and `mysql` are excluded. +This only applies if `DB_DUMP_BY_SCHEMA` is set to `true`. For example, if you set `DB_NAMES_EXCLUDE=database1 db2` and `DB_DUMP_BY_SCHEMA=true` then these two databases will not be dumped. + +**Dumping just some databases** + +* Environment variable: `DB_NAMES=db1,db2,db3` +* CLI flag: `--include=db1 --include=db2 --include=db3` +* Config file: +```yaml +dump: + include: + - db1 + - db2 + - db3 +``` + +**Dumping all databases** + +* Environment variable: `DB_NAMES=` +* CLI flag: `--include=` +* Config file: +```yaml +dump: + include: +``` + +Note that you do not need to set those explicitly; these are the defaults for those settings. + +**Dumping all databases except for one** + +* Environment variable: `DB_NAMES_EXCLUDE=notme,notyou` +* CLI flag: `--exclude=notme,notyou` +* Config file: +```yaml +dump: + exclude: + - notme + - notyou +``` + +### No Database Name + +By default, the backup assumes you will restore the dump into a database with the same name as the +one that you backed up. This means it will include the `USE ;` statement in the dump, so +it will switch to the correct database when you restore the dump. + +If you do not want the `USE` statement in the backup file, for example if you might want to restore the dump to a different +database, you need to remove the `USE ;` statement from the dump. `mysql-backup` does this for you when you set: + +* Environment variable: `NO_DATABASE_NAME=true`. +* CLI flag: `--no-database-name=true` +* Config file: +```yaml +dump: + no-database-name: true +``` + +Remember that each database schema will be in its own file, so you can determine the original by looking at the filename. + +### Dump File + +The backup file itself *always* is a compressed file the following format: + +`db_backup_YYYY-MM-DDTHH:mm:ssZ.` + +Where the date is RFC3339 date format, excluding the milliseconds portion. + +* YYYY = year in 4 digits +* MM = month number from 01-12 +* DD = date for 01-31 +* HH = hour from 00-23 +* mm = minute from 00-59 +* ss = seconds from 00-59 +* T = literal character `T`, indicating the separation between date and time portions +* Z = literal character `Z`, indicating that the time provided is UTC, or "Zulu" +* compression = appropriate file ending for selected compression, one of: `gz` (gzip, default); `bz2` (bzip2) + +The time used is UTC time at the moment the dump begins. + +Notes on format: + +* SMB does not allow for `:` in a filename (depending on server options), so they are replaced with the `-` character when writing to SMB. +* Some shells do not handle a `:` in the filename gracefully. Although these usually are legitimate characters as far as the _filesystem_ is concerned, your shell may not like it. To avoid this issue, you can set the "no-colons" options with the "safechars" configuration: + +* Environment variable: `DB_DUMP_SAFECHARS=true` +* CLI flag: `dump --safechars=true` +* Config file: +```yaml +dump: + safechars: true +``` + +### Dump Target + +You set where to put the dump file via configuration. The format is different between using environment variables +and CLI flags, vs config file. The environment variable and CLI support only simple formats, i.e. a single URL for a target. +For more advanced options, such as specific credentials and settings, use the config file. + +#### Environment Variables and CLI + +For example, to set it to a local directory named `/db`: + +* Environment variable: `DB_DUMP_TARGET=/db` +* CLI flag: `dump --target=/db` + +It **must** be a directory. + +The value of the environment variable or CLI target can be one of three formats, depending on the type of target: + +* Local: If it starts with a `/` character or `file:///` url, it will dump to a local path. If in a container, you should have it volume-mounted. +* SMB: If it is a URL of the format `smb://hostname/share/path/` then it will connect via SMB. +* S3: If it is a URL of the format `s3://bucketname.fqdn.com/path` then it will connect via using the S3 protocol. + +In addition, you can send to multiple targets by separating them with a whitespace for the environment variable, +or native multiple options for other configuration options. For example, to send to a local directory and an SMB share: + +* Environment variable: `DB_DUMP_TARGET="/db smb://hostname/share/path/"` +* CLI flag: `dump --target=/db --target=smb://hostname/share/path/"` + +##### Local File + +If the target starts with `/` or is a `file:///` then it is assumed to be a directory. The file will be written to that +directory. + +The target **must** be to a directory, wherein the dump file will be saved, using the naming +convention listed above. + +* Environment variable: `DB_DUMP_TARGET=/db` +* CLI flag: `dump --target=/db` + +If running in a container, you will need to ensure that the directory target is mounted. See +[container considerations](./container_considerations.md). + +##### SMB + +If you use a URL that begins with `smb://`, for example `smb://host/share/path`, the dump file will be saved +to an SMB server. + +The full URL **must** be to a directory on the SMB server, wherein the dump file will be saved, using the naming +convention listed above. + +If you need login credentials, you can either use the URL format `smb://user:pass@host/share/path`, +or you can use the SMB user and password options: + +* Environment variable: `SMB_USER=user SMB_PASS=pass` +* CLI flag: `--smb-user=user --smb-pass=pass` + +The explicit credentials in `SMB_USER` and `SMB_PASS` override user and pass values in the URL. + +Note that for smb, if the username includes a domain, e.g. your user is `mydom\myuser`, then you should use the smb convention of replacing the '\' with a ';'. In other words `smb://mydom;myuser:pass@host/share/path` + +##### S3 + +If you use a URL that begins with `s3://`, for example `s3://bucket/path`, the dump file will be saved to the S3 bucket. + +The full URL **must** be to a directory in the S3 bucket, wherein the dump file will be saved, using the naming +convention listed above. + +Note that for s3, you'll need to specify your AWS credentials and default AWS region via the appropriate +settings. + +For example, to set the AWS credentials: + +* Environment variable: `AWS_ACCESS_KEY_ID=accesskey AWS_SECRET_ACCESS_KEY=secretkey AWS_REGION=us-east-1` +* CLI flag: `--aws-access-key-id=accesskey --aws-secret-access-key=secretkey --aws-region=us-east-1` + +If you are using an s3-interoperable storage system like DigitalOcean you will need to +set the AWS endpoint URL via the AWS endpoint URL setting. + +For example, to use Digital Ocean, whose endpoint URL is `${REGION_NAME}.digitaloceanspaces.com`: + +* Environment variable: `AWS_ENDPOINT_URL=https://nyc3.digitaloceanspaces.com` +* CLI flag: `--aws-endpoint-url=https://nyc3.digitaloceanspaces.com` + +Note that if you have multiple S3-compatible backup targets, each with its own set of credentials, region +or endpoint, then you _must_ use the config file. There is no way to distinguish between multiple sets of +credentials via the environment variables or CLI flags, while the config file provides credentials for each +target. + +#### Configuration File + +The configuration file is the most flexible way to configure the dump target. It allows you to specify +multiple targets, along with credentials and options for each target. It also keeps credentials in a file, +rather than in the shell history, and makes the command-line much simpler. Finally, of course, it allows you to +track the history of the file. + +In the configuration file, a main section lists all potential targets, along with their configuration. + +```yaml +targets: + s3: + type: s3 + url: s3://bucket.us-west.amazonaws.com/databackup + region: us-west-1 + endpoint: https://s3.us-west-1.amazonaws.com + credentials: + access-key-id: access_key_id + secret-access-key: secret_access_key + file: + type: file + url: file:///tmp/databackup + otherfile: + type: file + url: /tmp/databackup + smbshare: + type: smb + url: smb://cifshost:2125/databackup + credentials: + domain: mydomain + username: user + password: password +``` + +Notice that each section is a key-value, where the key is the unique name for that target. It need not +have any meaning, other than a useful reference to you. For example, one of our targets is named `s3`, +while another is named `otherfile`. + +The uniquely named targets allow you to have separate configuration and credentials. For example, +you can have two distinct s3-compatible targets, each with its own endpoint, region, and credentials. That +would not be possible with the CLI or environment variables, as they rely on the common `AWS_ACCESS_KEY_ID` +environment variable, or its CLI flag equivalent. + +Once the targets are defined, you can reference them in the `dump` section by their unique keyed name: + +```yaml +dump: + targets: + - s3 + - file + - otherfile +``` + + ##### Custom backup file name + +There may be use-cases where you need to modify the name and path of the backup file when it gets uploaded to the dump target. + +For example, if you need the filename not to be `/db_backup_.gz` but perhaps `////mybackup_.gz`. + +To do that, configure the environment variable `DB_DUMP_FILENAME_PATTERN` or its CLI flag or config file equivalent. + +The content is a string that contains a pattern to be used for the filename. The pattern can contain the following placeholders: + +* `{{.now}}` - date of the backup, as included in `{{.dumpfile}}` and given by `date -u +"%Y-%m-%dT%H:%M:%SZ"` +* `{{.year}}` +* `{{.month}}` +* `{{.day}}` +* `{{.hour}}` +* `{{.minute}}` +* `{{.second}}` +* `{{.compression}}` - appropriate extension for the compression used, for example, `.gz` or `.bz2` + +**Example run:** + +``` +mysql-backup dump --source-filename-pattern="db-plus-wordpress_{{.now}}.gz" +``` + +If the execution time was `20180930151304`, then the file will be named `plus-wordpress_20180930151304.gz`. + +### Backup pre and post processing + +`mysql-backup` is capable of running arbitrary scripts for pre-backup and post-backup (but pre-upload) +processing. This is useful if you need to include some files along with the database dump, for example, +to backup a _WordPress_ install. + +In order to execute those scripts, you deposit them in appropriate dedicated directories and +inform `mysql-backup` about the directories. Any file ending in `.sh` in the directory will be executed. + +* When using the binary, set the directories via the environment variable `DB_DUMP_PRE_BACKUP_SCRIPTS` or `DB_DUMP_POST_BACKUP_SCRIPTS`, or their CLI flag or config file equivalents. +* When using the `mysql-backup` container, these are automatically set to the directories `/scripts.d/pre-backup/` and `/scripts.d/post-backup/`, inside the container respectively. It is up to you to mount them. + +**Example run binary:** + +```bash +mysql-backup dump --pre-backup-scripts=/path/to/pre-backup/scripts --post-backup-scripts=/path/to/post-backup/scripts +``` + +**Example run container:** + +```bash +docker run -d --restart=always -e DB_USER=user123 -e DB_PASS=pass123 -e DB_DUMP_FREQ=60 \ + -e DB_DUMP_BEGIN=2330 -e DB_DUMP_TARGET=/db -e DB_SERVER=my-db-container:db \ + -v /path/to/pre-backup/scripts:/scripts.d/pre-backup \ + -v /path/to/post-backup/scripts:/scripts.d/post-backup \ + -v /local/file/path:/db \ + databack/mysql-backup +``` + +Or, if you prefer [docker compose](https://docs.docker.com/compose/): + +```yml +version: '2.1' +services: + backup: + image: databack/mysql-backup + restart: always + volumes: + - /local/file/path:/db + - /path/to/pre-backup/scripts:/scripts.d/pre-backup + - /path/to/post-backup/scripts:/scripts.d/post-backup + env: + - DB_DUMP_TARGET=/db + - DB_USER=user123 + - DB_PASS=pass123 + - DB_DUMP_FREQ=60 + - DB_DUMP_BEGIN=2330 + - DB_SERVER=mysql_db + mysql_db: + image: mysql + .... +``` + +The scripts are _executed_ in the [entrypoint](https://github.com/databack/mysql-backup/blob/master/entrypoint) script, which means it has access to all exported environment variables. The following are available, but we are happy to export more as required (just open an issue or better yet, a pull request): + +* `DUMPFILE`: full path in the container to the output file +* `NOW`: date of the backup, as included in `DUMPFILE` and given by `date -u +"%Y-%m-%dT%H:%M:%SZ"` +* `DUMPDIR`: path to the destination directory so for example you can copy a new tarball including some other files along with the sql dump. +* `DEBUG`: To enable debug mode in post-backup scripts. + +In addition, all of the environment variables set for the container will be available to the script. + +For example, the following script will rename the backup file after the dump is done: + +````bash +#!/bin/bash +# Rename backup file. +if [[ -n "$DEBUG" ]]; then + set -x +fi + +if [ -e ${DUMPFILE} ]; +then + now=$(date +"%Y-%m-%d-%H_%M") + new_name=db_backup-${now}.gz + old_name=$(basename ${DUMPFILE}) + echo "Renaming backup file from ${old_name} to ${new_name}" + mv ${DUMPFILE} ${DUMPDIR}/${new_name} +else + echo "ERROR: Backup file ${DUMPFILE} does not exist!" +fi + +```` + +### Encrypting the Backup + +Post-processing gives you options to encrypt the backup using openssl or any other tools. You will need to have it +available on your system. When running in the `mysql-backup` container, the openssl binary is available +to the processing scripts. + +The sample [examples/encrypt.sh](./examples/encrypt.sh) provides a sample post-processing script that you can use +to encrypt your backup with AES256. diff --git a/docs/configuration.md b/docs/configuration.md new file mode 100644 index 00000000..69548c69 --- /dev/null +++ b/docs/configuration.md @@ -0,0 +1,103 @@ +# Configuring mysql-backup + +`mysql-backup` can be configured using one or more of: + +* environment variables +* CLI flags +* a configuration file + +In all cases, the command line flag option takes precedence over the environment variable which takes +precedence over the config file option. + +The environment variables, CLI flag options and config file options are similar, but not exactly the same, +due to variances in how the various are structured. As a general rule: + +* Environment variables are all uppercase, with words separated by underscores, and most start with `DB_DUMP`. For example, `DB_DUMP_FREQ=60`. +* CLI flags are all lowercase, with words separated by hyphens. Since the CLI has sub-commands, the `dump-` and `restore-` are unnecessary. For example, `mysql-backup dump --frequency=60` or `mysql-backup restore --target=/foo/file.gz`. + +For example, the following are equivalent. + +Set dump frequency to 60 minutes: + +* Environment variable: `DB_DUMP_FREQ=60` +* CLI flag: `mysql-backup dump --frequency=60` +* Config file: +```yaml +dump: + schedule: + frequency: 60 +``` + +Set the dump target to the directory `/db`: + +* Environment variable: `DB_DUMP_TARGET=/db` +* CLI flag: `mysql-backup dump --target=/db` +* Config file: +```yaml +dump: + targets: + - file + +targets: + file: + url: /db +``` + +**Security Notices** + +If using environment variables with any credentials in a container, you should consider the [use of `--env-file=`](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables-e-env-env-file), [docker secrets](https://docs.docker.com/engine/swarm/secrets/) to keep your secrets out of your shell history + +If using CLI flags with any credentials, you should consider using a config file instead of directly +placing credentials in the flags, where they may be kept in shell history. + +There is **no** default configuration file. To use a configuration file, you **must** specify it with the `--config` flag. + +## Sample Configuration Files + +Various sample configuration files are available in the [sample-configs](../sample-configs/) directory. + +## Configuration Options + +The following are the environment variables, CLI flags and configuration file options for a backup or a restore. + +| Purpose | Backup / Restore | CLI Flag | Env Var | Config Key | Default | +| --- | --- | --- | --- | --- | --- | +| hostname to connect to database. Required. | BR | `server` | `DB_SERVER` | `database.server` | | +| port to use to connect to database. Optional. | BR | `port` | `DB_PORT` | `database.port` | 3306 | +| username for the database | BR | `user` | `DB_USER` | `database.credentials.username` | | +| password for the database | BR | `pass` | `DB_PASS` | `database.credentials.password` | | +| names of databases to dump, comma-separated | B | `include` | `DB_NAMES` | `database.include` | all databases in the server | +| names of databases to exclude from the dump | B | `exclude` | `DB_NAMES_EXCLUDE` | `database.exclude` | | +| do not include `USE ;` statement in the dump | B | `no-database-name` | `NO_DATABASE_NAME` | `database.no-database-name` | `false` | +| restore to a specific database | R | `restore --database` | `RESTORE_DATABASE` | `restore.database` | | +| how often to do a dump, in minutes | B | `dump --frequency` | `DB_DUMP_FREQ` | `dump.schedule.frequency` | `1440` (in minutes), i.e. once per day | +| what time to do the first dump | B | `dump --begin` | `DB_DUMP_BEGIN` | `dump.schedule.begin` | `0`, i.e. immediately | +| cron schedule for dumps | B | `dump --cron` | `DB_DUMP_CRON` | `dump.schedule.cron` | | +| run the backup a single time and exit | B | `dump --once` | `RUN_ONCE` | `dump.schedule.once` | `false` | +| enable debug logging | BR | `debug` | `DEBUG` | `logging: debug` | `false` | +| where to put the dump file; see [backup](./backup.md) | B | `dump --target` | `DB_DUMP_TARGET` | `dump.targets` | | +| where the restore file exists; see [restore](./restore.md) | R | `restore --target` | `DB_RESTORE_TARGET` | `restore.target` | | +| replace any `:` in the dump filename with `-` | B | `dump --safechars` | `DB_DUMP_SAFECHARS` | `database.safechars` | `false` | +| AWS access key ID, used only if a target does not have one | BR | `aws-access-key-id` | `AWS_ACCESS_KEY_ID` | `dump.targets[s3-target].credentials.access-key-id` | | +| AWS secret access key, used only if a target does not have one | BR | `aws-secret-access-key` | `AWS_SECRET_ACCESS_KEY` | `dump.targets[s3-target].credentials.secret-access-key` | | +| AWS default region, used only if a target does not have one | BR | `aws-region` | `AWS_REGION` | `dump.targets[s3-target].region` | | +| alternative endpoint URL for S3-interoperable systems, used only if a target does not have one | BR | `aws-endpoint-url` | `AWS_ENDPOINT_URL` | `dump.targets[s3-target].endpoint` | | +| SMB username, used only if a target does not have one | BR | `smb-user` | `SMB_USER` | `dump.targets[smb-target].credentials.username` | | +| SMB password, used only if a target does not have one | BR | `smb-pass` | `SMB_PASS` | `dump.targets[smb-target].credentials.password` | | +| compression to use, one of: `bzip2`, `gzip` | B | `compression` | `COMPRESSION` | `dump.compression` | `gzip` | +| when in container, run the dump or restore with `nice`/`ionice` | BR | `` | `NICE` | `` | `false` | +| tmp directory to be used during backup creation and other operations | BR | `tmp` | `TMP_PATH` | `tmp` | system-defined | +| filename to save the target backup file | B | `dump --filename-pattern` | `DB_DUMP_FILENAME_PATTERN` | `dump.filename-pattern` | | +| directory with scripts to execute before backup | B | `dump --pre-backup-scripts` | `DB_DUMP_PRE_BACKUP_SCRIPTS` | `dump.scripts.pre-backup` | in container, `/scripts.d/pre-backup/` | +| directory with scripts to execute after backup | B | `dump --post-backup-scripts` | `DB_DUMP_POST_BACKUP_SCRIPTS` | `dump.scripts.post-backup` | in container, `/scripts.d/post-backup/` | +| directory with scripts to execute before restore | R | `restore --pre-restore-scripts` | `DB_DUMP_PRE_RESTORE_SCRIPTS` | `dump.pre-restore-scripts` | in container, `/scripts.d/pre-restore/` | +| directory with scripts to execute after restore | R | `restore --post-restore-scripts` | `DB_DUMP_POST_RESTORE_SCRIPTS` | `dump.post-restore-scripts` | in container, `/scripts.d/post-restore/` | + + +## Unsupported Options + +Unsupported options from the old version of `mysql-backup`: + +* `MYSQLDUMP_OPTS`: A string of options to pass to `mysqldump`, e.g. `MYSQLDUMP_OPTS="--opt abc --param def --max_allowed_packet=123455678"` will run `mysqldump --opt abc --param def --max_allowed_packet=123455678`. These are replaced by individual options. +* `AWS_CLI_OPTS`: Additional arguments to be passed to the `aws` part of the `aws s3 cp` command, click [here](https://docs.aws.amazon.com/cli/latest/reference/#options) for a list. These are replaced by target-specific options. +* `AWS_CLI_S3_CP_OPTS`: Additional arguments to be passed to the `s3 cp` part of the `aws s3 cp` command, click [here](https://docs.aws.amazon.com/cli/latest/reference/s3/cp.html#options) for a list. If you are using AWS KMS, `sse`, `sse-kms-key-id`, etc., may be of interest. These are replaced by target-specific options diff --git a/docs/container_considerations.md b/docs/container_considerations.md new file mode 100644 index 00000000..475917b5 --- /dev/null +++ b/docs/container_considerations.md @@ -0,0 +1,31 @@ +# Container Considerations + +There are certain special considerations when running in a container. + +## Permissions + +By default, the backup/restore process does **not** run as root (UID O). Whenever possible, you should run processes (not just in containers) as users other than root. In this case, it runs as username `appuser` with UID/GID `1005`. + +In most scenarios, this will not affect your backup process negatively. However, if you are using the "Local" dump target, i.e. your `DB_DUMP_TARGET` starts with `/` - and, most likely, is a volume mounted into the container - you can run into permissions issues. For example, if your mounted directory is owned by root on the host, then the backup process will be unable to write to it. + +In this case, you have two options: + +* Run the container as root, `docker run --user 0 ... ` or, in i`docker-compose.yml`, `user: "0"` +* Ensure your mounted directory is writable as UID or GID `1005`. + +## Nice + +mysql backups can be resource intensive. When running using the CLI, it is up to you to use +`nice`/`ionice` to control it, if you so desire. If running in a container, you can tell the +container to be "nicer" but setting `NICE=true`. + +For more information, see https://13rac1.com/articles/2013/03/forcing-mysqldump-always-be-nice-cpu-and-io/ + +## File Dump Target + +When backing up, the dump target is the location where the dump should be placed. When running in a container, +defaults to `/backup` in the container. Of course, having the backup in the container does not help very much, so we very strongly recommend you volume mount it outside somewhere. For example: + +```bash +docker run -v /path/to/backup:/mybackup -e DB_DUMP_TARGET=/mybackup ... +``` \ No newline at end of file diff --git a/docs/contributing.md b/docs/contributing.md new file mode 100644 index 00000000..74f73478 --- /dev/null +++ b/docs/contributing.md @@ -0,0 +1,25 @@ +# Contributing + +## Build Process + +This github repo is the source for the mysql-backup image. The actual image is stored on the docker hub at `databack/mysql-backup`, and is triggered with each commit to the source by automated build via Webhooks. + +There are 2 builds: 1 for version based on the git tag, and another for the particular version number. + +## Tests + +The tests all run in docker containers, to avoid the need to install anything other than `make` and `docker`, and even can run over remote docker connections, avoiding any local bind-mounts. To run all tests: + +``` +make test +``` + +To run with debugging + +``` +make test DEBUG=debug +``` + +The above will generate _copious_ outputs, so you might want to redirect stdout and stderr to a file. + +This runs each of the several testing targets, each of which is a script in `test/test_*.sh`, which sets up tests, builds containers, runs the tests, and collects the output. diff --git a/docs/database_address.md b/docs/database_address.md new file mode 100644 index 00000000..943645db --- /dev/null +++ b/docs/database_address.md @@ -0,0 +1,23 @@ +# Connecting to the Database + +In order to perform the actual dump or restore, `mysql-backup` needs to connect to the database. You **must** pass the database address via configuration. For example, to set the address to `my-db-address`: + +* Environment variable: `DB_SERVER=my-db-address` +* CLI flag: `--server=my-db-address` +* Config file: +```yaml +db-server: my-db-address +``` + +The address itself, in the above example `my-db-address`, can be a container or any database process, as long as it is +accessible from where the `mysql-backup` runs. + +The default port is `3306`, the normal default port for mysql. You can override the default port of `3306` via +configuration. For example, to set the port to `3456`: + +* Environment variable: `DB_PORT=3456` +* CLI flag: `--port=3456` +* Config file: +```yaml +db-port: 3456 +``` diff --git a/docs/format.md b/docs/format.md new file mode 100644 index 00000000..ca31ef05 --- /dev/null +++ b/docs/format.md @@ -0,0 +1,15 @@ +The dump file is a tar.gz with one file per database. + +The backup file _always_ dumps at the database server level, i.e. it will +call `USE DATABASE ` for each database to be backed up, +and will include `CREATE DATABASE` and `USE DATABASE` in the backup file. + +This is equivalent to passing [`--databases `](https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html#option_mysqldump_databases) to `mysqldump`: + +> With this option, it treats all name arguments as database names. CREATE DATABASE and USE statements are included in the output before each new database. + +Or [`--all-databases`](https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html#option_mysqldump_all-databases): + +> This is the same as using the --databases option and naming all the databases on the command line. + + diff --git a/docs/restore.md b/docs/restore.md new file mode 100644 index 00000000..6149fdfa --- /dev/null +++ b/docs/restore.md @@ -0,0 +1,157 @@ +# Restoring + +Restoring uses the same database, SMB and S3 configuration options as [backup](./backup.md). + +Like dump, you point it at a target, which is a location for backups, select a backup file, +and it will restore the database from that file in that target. +The primary difference is the use of restore target, instead of a dump target. This follows the same syntax as +the dump target, but instead of a dump _directory_, it is the actual restore _file_, which should be a +compressed dump file. + +In order to restore, you need the following: + +* A storage target - directory, SMB or S3 - to restore from +* A dump file in the storage target, which can come from any of your targets or a local file (which itself is a target) +* A database to restore to, along with access credentials +* Optionally, pre- and post-restore processing scripts + +## Configuring restore + +`restore` **always** must have one argument, the name of the file in the target from which to restore. E.g. + +```bash +$ restore db_backup_201509271627.gz +``` + +You can provide the target via environment variables, CLI or the config file. + +### Environment variables and CLI + +From a local file: + +* Environment variable: `DB_RESTORE_TARGET=/backup/ restore db_backup_201509271627.gz` +* Command line: `restore --target=/backup/ db_backup_201509271627.gz` + +From S3: + +* Environment variable: `DB_RESTORE_TARGET=s3://mybucket/ restore db_backup_201509271627.gz` +* Command line: `restore --target=s3://mybucket/ db_backup_201509271627.gz` + +From SMB: + +* Environment variable: `DB_RESTORE_TARGET=smb://myserver/myshare/ restore db_backup_201509271627.gz` +* Command line: `restore --target=smb://myserver/myshare/ restore db_backup_201509271627.gz` + +The credentials are provided using the same CLI flags and/or environment variables as described in [backup](./docs/backup.md). + +### Config file + +A config file may already contain much useful information: + +* targets and their credentials +* database connectivity information and credentials +* pre- and post-restore processing scripts + +In order to restore from a config file, you provide a `--target` that references one of the existing targets. The URL +begins with `config://` as the scheme, followed by the name of the target. For example, if you have a target named +`mybucket`, then you can restore to it with: + +```bash +$ mysql-backup restore --target=config://mybucket/ db_backup_201509271627.gz +``` + +Since the target is `config://`, it will use the configuration information for that target from the config file. +It references the target named `mybucket`, including the provided configuration and credentials. Within that target, +it then retrieves the file named `db_backup_201509271627.gz` and restores it. + +As you did not specify a database, it will use the database information from the config file as well. + +### Restore when using docker-compose + +`docker-compose` automagically creates a network when started. `docker run` simply attaches to the bridge network. If you are trying to communicate with a mysql container started by docker-compose, you'll need to specify the network in your command arguments. You can use `docker network ls` to see what network is being used, or you can declare a network in your docker-compose.yml. + +#### Example: + +`docker run -e DB_SERVER=gotodb.example.com -e DB_USER=user123 -e DB_PASS=pass123 -e DB_RESTORE_TARGET=/backup/ -v /local/path:/backup --network="skynet" databack/mysql-backup restore db_backup_201509271627.gz` + +### Using docker secrets + +Environment variables used in this image can be passed in files as well. This is useful when you are using docker secrets for storing sensitive information. + +As you can set environment variable with `-e ENVIRONMENT_VARIABLE=value`, you can also use `-e ENVIRONMENT_VARIABLE_FILE=/path/to/file`. Contents of that file will be assigned to the environment variable. + +**Example:** + +```bash +$ docker run -d \ + -e DB_HOST_FILE=/run/secrets/DB_HOST \ + -e DB_USER_FILE=/run/secrets/DB_USER \ + -e DB_PASS_FILE=/run/secrets/DB_PASS \ + -v /local/file/path:/db \ + databack/mysql-backup +``` + +### Restore pre and post processing + +As with backups pre and post processing, you have pre- and post-restore processing. + +This is useful if you need to restore a backup file that includes some files along with the database dump. +For example, to restore a _WordPress_ install, you would uncompress a tarball containing +the db backup and a second tarball with the contents of a WordPress install on +`pre-restore`. Then on `post-restore`, uncompress the WordPress files on the container's web server root directory. + +In order to perform pre-restore processing, set the pre-restore processing directory, and `mysql-backup` +will execute any file that ends in `.sh`. For example: + +* Environment variable: `DB_DUMP_PRE_RESTORE_SCRIPTS=/scripts.d/pre-restore` +* Command line: `restore --pre-restore-scripts=/scripts.d/pre-restore` +* Config file: +```yaml +restore: + scripts: + pre-restore: /scripts.d/pre-restore +``` + +When running in a container, these are set automatically to `/scripts.d/pre-restore` and `/scripts.d/post-restore` +respectively. + +For an example take a look at the post-backup examples, all variables defined for post-backup scripts are available for pre-processing too. Also don't forget to add the same host volumes for `pre-restore` and `post-restore` directories as described for post-backup processing. + +### Restoring to a different database + +The dump files normally contain a `CREATE DATABASE ` statement, to create the database if it +does not exist, followed by a `USE ;` statement, which tells MySQL which database to continue the restore into. + +Sometimes, you wish to restore a dump file to a different database. +For example, you dumped a database named `FOO`, and wish to restore it to a database named `BAR`. +The dump file will have: + +```sql +CREATE DATABASE `FOO`; +USE `FOO`; +``` + +`mysql-backup` can be instructed to restore `FOO` into `BAR` instead, as well as ensuring `BAR` exists. +Use the `--database` option to to provide a mapping of `FROM` to `TO` database names. + +Continuing our example, to restore a dump file that has `USE FOO;` in it, + +* Environment variable: `DB_RESTORE_DATABASE=FOO:BAR` +* Command line: `restore --database=FOO:BAR` + +You can have multiple mappings by separating them with commas. For example: + +* Environment variable: `DB_RESTORE_DATABASE=FOO:BAR,BAZ:QUX` +* Command line: `restore --database=FOO:BAR,BAZ:QUX` + +Database names are case-insensitive, as they are in mysql. + +There is no config file support for mappings. + +When the restore runs, it will do the following: + +1. If the dump file has `USE ;` in it, it will be replaced with `USE ;` where `` is the `TO` database name. +1. Run the restore, which will restore into the `TO` database name. + +If the dump file does *not* have the `USE ;` statement in it, for example, if it was created with +`mysql-backup dump --no-database-name`, then it simply restores as is. Be careful with this. diff --git a/docs/scheduling.md b/docs/scheduling.md new file mode 100644 index 00000000..ed4f69f5 --- /dev/null +++ b/docs/scheduling.md @@ -0,0 +1,85 @@ +# Backup Scheduling + +`mysql-backup` can be run either once, doing a backup and exiting, or as a long-running task, +backing up on schedule. + +There are several options for scheduling how often a backup should run: + +* run just once and exit. +* run every x minutes, optionally delaying the first one by a certain amount of time +* run on a schedule. + + +## Order of Priority + +The scheduling options have an order of priority: + +1. If run once is set, it will run immediately and exit, ignoring all other scheduling options. +2. If cron is set, it runs according to the cron schedule, ignoring frequency and delayed start. +3. Frequency and optionally delayed start are used. + +## Scheduling Options + +### Run once + +You can set it to run just once via: + +* Environment variable: `RUN_ONCE=true` +* CLI flag: `dump --run-once` +* Config file: +```yaml +dump: + run-once: true +``` + +If you set it to run just once, the backup will run once and then exit. + +**This overrides all other scheduling options**. + +This is useful for one-offs, or if `mysql-backup` is being run via an external scheduler, such as cron +or [kubernetes cron jobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/), and thus +don't want `mysql-backup` to do the scheduling internally. + +### Cron Scheduling + +You can set a cron schedule via: + +* Environment variable: `CRON_SCHEDULE=0 * * * *` +* CLI flag: `dump --cron="0 * * * *"` +* Config file: +```yaml +dump: + cron: 0 * * * * +``` + +The cron dump schedule option uses standard [crontab syntax](https://en.wikipedia.org/wiki/Cron), a +single line. + +If a cron-scheduled backup takes longer than the beginning of the next backup window, it will be skipped. For example, if your cron line is scheduled to backup every hour, and the backup that runs at 13:00 finishes at 14:05, the next backup will not be immediate, but rather at 15:00. + +### Frequency and Delayed Start + +If neither run once nor cron is set, then `mysql-backup` will use the frequency and optional delayed start options. + +The value for each is minutes. Thus, you can set backup to run every hour by setting the frequency to `60`. +Similarly, you can delay start by 2 hours by setting the delayed start to `120`. + +You can set the frequency start via: + +* Environment variable: `DB_DUMP_FREQ=60` +* CLI flag: `dump --frequency=60` +* Config file: +```yaml +dump: + frequency: 60 +``` + +You can set the delayed start via: + +* Environment variable: `DB_DUMP_DELAY=120` +* CLI flag: `dump --delay=120` +* Config file: +```yaml +dump: + delay: 120 +``` diff --git a/entrypoint b/entrypoint index a0fa70b4..2bce6c56 100755 --- a/entrypoint +++ b/entrypoint @@ -1,247 +1,3 @@ -#!/bin/bash +#!/bin/sh -. /functions.sh - -if [[ -n "$DB_DUMP_DEBUG" ]]; then - set -x -fi - -# get all variables from environment variables or files (e.g. VARIABLE_NAME_FILE) -# (setting defaults happens here, too) -file_env "DB_SERVER" -file_env "DB_PORT" -file_env "DB_USER" -file_env "DB_PASS" -file_env "DB_NAMES" -file_env "DB_NAMES_EXCLUDE" - -file_env "DB_DUMP_FREQ" "1440" -file_env "DB_DUMP_BEGIN" "+0" -file_env "DB_DUMP_DEBUG" -file_env "DB_DUMP_TARGET" "/backup" -file_env "DB_DUMP_BY_SCHEMA" -file_env "DB_DUMP_KEEP_PERMISSIONS" "true" - -file_env "DB_RESTORE_TARGET" - -file_env "AWS_ENDPOINT_URL" -file_env "AWS_ENDPOINT_OPT" -file_env "AWS_CLI_OPTS" -file_env "AWS_CLI_S3_CP_OPTS" -file_env "AWS_ACCESS_KEY_ID" -file_env "AWS_SECRET_ACCESS_KEY" -file_env "AWS_DEFAULT_REGION" - -file_env "SMB_USER" -file_env "SMB_PASS" - -file_env "TMP_PATH" "/tmp" - -file_env "COMPRESSION" "gzip" - -if [[ -n "$DB_DUMP_DEBUG" ]]; then - set -x -fi - -# ensure it is defined -MYSQLDUMP_OPTS=${MYSQLDUMP_OPTS:-} - -# login credentials -if [ -n "${DB_USER}" ]; then - DBUSER="-u${DB_USER}" -else - DBUSER= -fi -if [ -n "${DB_PASS}" ]; then - DBPASS="-p${DB_PASS}" -else - DBPASS= -fi - -# database server -if [ -z "${DB_SERVER}" ]; then - echo "DB_SERVER variable is required. Exiting." - exit 1 -fi -# database port -if [ -z "${DB_PORT}" ]; then - echo "DB_PORT not provided, defaulting to 3306" - DB_PORT=3306 -fi - -# -# set compress and decompress commands -COMPRESS= -UNCOMPRESS= -case $COMPRESSION in - gzip) - COMPRESS="gzip" - UNCOMPRESS="gunzip" - EXTENSION="tgz" - ;; - bzip2) - COMPRESS="bzip2" - UNCOMPRESS="bzip2 -d" - EXTENSION="tbz2" - ;; - *) - echo "Unknown compression requested: $COMPRESSION" >&2 - exit 1 -esac - - -# temporary dump dir -TMPDIR="${TMP_PATH}/backups" -TMPRESTORE="${TMP_PATH}/restorefile" - -# this is global, so has to be set outside -declare -A uri - - - -if [[ -n "$DB_RESTORE_TARGET" ]]; then - # Execute additional scripts for pre backup restore processing. For example, - # uncompress a tarball that contains the tarballs for the sql dump and a - # wordpress installation. - if [ -d /scripts.d/pre-restore/ ]; then - for i in $(ls /scripts.d/pre-restore/*.sh); do - if [ -x $i ]; then - DB_RESTORE_TARGET=${DB_RESTORE_TARGET} DB_DUMP_DEBUG=${DB_DUMP_DEBUG} $i - fi - done - fi - uri_parser ${DB_RESTORE_TARGET} - if [[ "${uri[schema]}" == "file" ]]; then - cp $DB_RESTORE_TARGET $TMPRESTORE 2>/dev/null - elif [[ "${uri[schema]}" == "s3" ]]; then - [[ -n "$AWS_ENDPOINT_URL" ]] && AWS_ENDPOINT_OPT="--endpoint-url $AWS_ENDPOINT_URL" - aws ${AWS_CLI_OPTS} ${AWS_ENDPOINT_OPT} s3 cp ${AWS_CLI_S3_CP_OPTS} "${DB_RESTORE_TARGET}" $TMPRESTORE - elif [[ "${uri[schema]}" == "smb" ]]; then - if [[ -n "$SMB_USER" ]]; then - UPASSARG="-U" - UPASS="${SMB_USER}%${SMB_PASS}" - elif [[ -n "${uri[user]}" ]]; then - UPASSARG="-U" - UPASS="${uri[user]}%${uri[password]}" - else - UPASSARG= - UPASS= - fi - if [[ -n "${uri[userdomain]}" ]]; then - UDOM="-W ${uri[userdomain]}" - else - UDOM= - fi - smbclient -N "//${uri[host]}/${uri[share]}" ${UPASSARG} "${UPASS}" ${UDOM} -c "get ${uri[sharepath]} ${TMPRESTORE}" - fi - # did we get a file? - if [[ -f "$TMPRESTORE" ]]; then - if [ "$SINGLE_DATABASE" = "true" ]; then - DBDATABASE="-D$DB_NAMES" - else - DBDATABASE= - fi - workdir="${TMP_PATH}/restore.$$" - rm -rf $workdir - mkdir -p $workdir - $UNCOMPRESS < $TMPRESTORE | tar -C $workdir -xvf - - RESTORE_OPTS=${RESTORE_OPTS:-} - # If there are multiple schemas in the archive (e.g. DB_DUMP_BY_SCHEMA was used) and DB_NAMES is set, - # restore only the required databases - if [ "$SINGLE_DATABASE" != "true" ] && [[ $(ls -1q $workdir/* | wc -l) -gt 1 ]] && [[ -n "$DB_NAMES" ]]; then - for onedb in $DB_NAMES; do - echo "Restoring $onedb from " $workdir/$onedb* - # /!\ If a schema has a name that begins with another one, it will executed multiple times the other one - cat $workdir/$onedb* | mysql $RESTORE_OPTS -h $DB_SERVER -P $DB_PORT $DBUSER $DBPASS - done - else - cat $workdir/* | mysql $RESTORE_OPTS -h $DB_SERVER -P $DB_PORT $DBUSER $DBPASS $DBDATABASE - fi - rm -rf $workdir - /bin/rm -f $TMPRESTORE - else - echo "Could not find restore file $DB_RESTORE_TARGET" - exit 1 - fi - # Execute additional scripts for post backup restore processing. For example, - # uncompress a tarball that contains the files of a wordpress installation - if [ -d /scripts.d/post-restore/ ]; then - for i in $(ls /scripts.d/post-restore/*.sh); do - if [ -x $i ]; then - DB_RESTORE_TARGET=${DB_RESTORE_TARGET} DB_DUMP_DEBUG=${DB_DUMP_DEBUG} $i - fi - done - fi -else - # wait for the next time to start a backup - # for debugging - echo Starting at $(date) - last_run=0 - current_time=$(date +"%s") - freq_time=$(($DB_DUMP_FREQ*60)) - # get the begin time on our date - # REMEMBER: we are using the basic date package in alpine - # could be a delay in minutes or an absolute time of day - if [ -n "$DB_DUMP_CRON" ]; then - # calculate how long until the next cron instance is met - waittime=$(wait_for_cron "$DB_DUMP_CRON" "$current_time" $last_run) - elif [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then - waittime=$(( ${BASH_REMATCH[1]} * 60 )) - target_time=$(($current_time + $waittime)) - else - today=$(date +"%Y-%m-%d") - target_time=$(date --date="${today} ${DB_DUMP_BEGIN}" +"%s") - - if [[ "$target_time" < "$current_time" ]]; then - target_time=$(($target_time + 24*60*60)) - fi - - waittime=$(($target_time - $current_time)) - fi - - # If RUN_ONCE is set, don't wait - if [ -z "${RUN_ONCE}" ]; then - sleep $waittime - last_run=$(date +"%s") - fi - - # enter the loop - exit_code=0 - while true; do - # make sure the directory exists - mkdir -p $TMPDIR - do_dump - [ $? -ne 0 ] && exit_code=1 - # we can have multiple targets - for target in ${DB_DUMP_TARGET}; do - backup_target ${target} - [ $? -ne 0 ] && exit_code=1 - done - # remove lingering file - /bin/rm ${TMPDIR}/${SOURCE} - - # wait, unless RUN_ONCE is set - current_time=$(date +"%s") - if [ -n "${RUN_ONCE}" ]; then - exit $exit_code - elif [ -n "${DB_DUMP_CRON}" ]; then - waittime=$(wait_for_cron "${DB_DUMP_CRON}" "$current_time" $last_run) - else - current_time=$(date +"%s") - # Calculate how long the previous backup took - backup_time=$(($current_time - $target_time)) - # Calculate how many times the frequency time was passed during the previous backup. - freq_time_count=$(($backup_time / $freq_time)) - # Increment the count with one because we want to wait at least the frequency time once. - freq_time_count_to_add=$(($freq_time_count + 1)) - # Calculate the extra time to add to the previous target time - extra_time=$(($freq_time_count_to_add*$freq_time)) - # Calculate the new target time needed for the next calculation - target_time=$(($target_time + $extra_time)) - # Calculate the wait time - waittime=$(($target_time - $current_time)) - fi - sleep $waittime - last_run=$(date +"%s") - done -fi +/mysql-backup $@ diff --git a/entrypoint_orig b/entrypoint_orig new file mode 100755 index 00000000..2a5852f6 --- /dev/null +++ b/entrypoint_orig @@ -0,0 +1,195 @@ +#!/bin/bash + +. /functions.sh + +# ensure it is defined +MYSQLDUMP_OPTS=${MYSQLDUMP_OPTS:-} + +# login credentials +if [ -n "${DB_USER}" ]; then + DBUSER="-u${DB_USER}" +else + DBUSER= +fi +if [ -n "${DB_PASS}" ]; then + DBPASS="-p${DB_PASS}" +else + DBPASS= +fi + +# database server +if [ -z "${DB_SERVER}" ]; then + echo "DB_SERVER variable is required. Exiting." + exit 1 +fi +# database port +if [ -z "${DB_PORT}" ]; then + echo "DB_PORT not provided, defaulting to 3306" + DB_PORT=3306 +fi + +# +# set compress and decompress commands +COMPRESS= +UNCOMPRESS= +case $COMPRESSION in + gzip) + COMPRESS="gzip" + UNCOMPRESS="gunzip" + EXTENSION="tgz" + ;; + bzip2) + COMPRESS="bzip2" + UNCOMPRESS="bzip2 -d" + EXTENSION="tbz2" + ;; + *) + echo "Unknown compression requested: $COMPRESSION" >&2 + exit 1 +esac + + +# temporary dump dir +TMPDIR="${TMP_PATH}/backups" +TMPRESTORE="${TMP_PATH}/restorefile" + +# this is global, so has to be set outside +declare -A uri + + + +if [[ -n "$DB_RESTORE_TARGET" ]]; then + # Execute additional scripts for pre backup restore processing. For example, + # uncompress a tarball that contains the tarballs for the sql dump and a + # wordpress installation. + if [ -d /scripts.d/pre-restore/ ]; then + for i in $(ls /scripts.d/pre-restore/*.sh); do + if [ -x $i ]; then + DB_RESTORE_TARGET=${DB_RESTORE_TARGET} DB_DUMP_DEBUG=${DB_DUMP_DEBUG} $i + fi + done + fi + uri_parser ${DB_RESTORE_TARGET} + if [[ "${uri[schema]}" == "file" ]]; then + cp $DB_RESTORE_TARGET $TMPRESTORE 2>/dev/null + elif [[ "${uri[schema]}" == "s3" ]]; then + [[ -n "$AWS_ENDPOINT_URL" ]] && AWS_ENDPOINT_OPT="--endpoint-url $AWS_ENDPOINT_URL" + aws ${AWS_CLI_OPTS} ${AWS_ENDPOINT_OPT} s3 cp ${AWS_CLI_S3_CP_OPTS} "${DB_RESTORE_TARGET}" $TMPRESTORE + elif [[ "${uri[schema]}" == "smb" ]]; then + if [[ -n "$SMB_USER" ]]; then + UPASSARG="-U" + UPASS="${SMB_USER}%${SMB_PASS}" + elif [[ -n "${uri[user]}" ]]; then + UPASSARG="-U" + UPASS="${uri[user]}%${uri[password]}" + else + UPASSARG= + UPASS= + fi + if [[ -n "${uri[userdomain]}" ]]; then + UDOM="-W ${uri[userdomain]}" + else + UDOM= + fi + smbclient -N "//${uri[host]}/${uri[share]}" ${UPASSARG} "${UPASS}" ${UDOM} -c "get ${uri[sharepath]} ${TMPRESTORE}" + fi + # did we get a file? + if [[ -f "$TMPRESTORE" ]]; then + if [ "$SINGLE_DATABASE" = "true" ]; then + DBDATABASE="-D$DB_NAMES" + else + DBDATABASE= + fi + workdir="${TMP_PATH}/restore.$$" + rm -rf $workdir + mkdir -p $workdir + $UNCOMPRESS < $TMPRESTORE | tar -C $workdir -xvf - + cat $workdir/* | mysql -h $DB_SERVER -P $DB_PORT $DBUSER $DBPASS $DBDATABASE + rm -rf $workdir + /bin/rm -f $TMPRESTORE + else + echo "Could not find restore file $DB_RESTORE_TARGET" + exit 1 + fi + # Execute additional scripts for post backup restore processing. For example, + # uncompress a tarball that contains the files of a wordpress installation + if [ -d /scripts.d/post-restore/ ]; then + for i in $(ls /scripts.d/post-restore/*.sh); do + if [ -x $i ]; then + DB_RESTORE_TARGET=${DB_RESTORE_TARGET} DB_DUMP_DEBUG=${DB_DUMP_DEBUG} $i + fi + done + fi +else + # wait for the next time to start a backup + # for debugging + echo Starting at $(date) + last_run=0 + current_time=$(date +"%s") + freq_time=$(($DB_DUMP_FREQ*60)) + # get the begin time on our date + # REMEMBER: we are using the basic date package in alpine + # could be a delay in minutes or an absolute time of day + if [ -n "$DB_DUMP_CRON" ]; then + # calculate how long until the next cron instance is met + waittime=$(wait_for_cron "$DB_DUMP_CRON" "$current_time" $last_run) + elif [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then + waittime=$(( ${BASH_REMATCH[1]} * 60 )) + target_time=$(($current_time + $waittime)) + else + today=$(date +"%Y-%m-%d") + target_time=$(date --date="${today} ${DB_DUMP_BEGIN}" +"%s") + + if [[ "$target_time" < "$current_time" ]]; then + target_time=$(($target_time + 24*60*60)) + fi + + waittime=$(($target_time - $current_time)) + fi + + # If RUN_ONCE is set, don't wait + if [ -z "${RUN_ONCE}" ]; then + sleep $waittime + last_run=$(date +"%s") + fi + + # enter the loop + exit_code=0 + while true; do + # make sure the directory exists + mkdir -p $TMPDIR + do_dump + [ $? -ne 0 ] && exit_code=1 + # we can have multiple targets + for target in ${DB_DUMP_TARGET}; do + backup_target ${target} + [ $? -ne 0 ] && exit_code=1 + done + # remove lingering file + /bin/rm ${TMPDIR}/${SOURCE} + + # wait, unless RUN_ONCE is set + current_time=$(date +"%s") + if [ -n "${RUN_ONCE}" ]; then + exit $exit_code + elif [ -n "${DB_DUMP_CRON}" ]; then + waittime=$(wait_for_cron "${DB_DUMP_CRON}" "$current_time" $last_run) + else + current_time=$(date +"%s") + # Calculate how long the previous backup took + backup_time=$(($current_time - $target_time)) + # Calculate how many times the frequency time was passed during the previous backup. + freq_time_count=$(($backup_time / $freq_time)) + # Increment the count with one because we want to wait at least the frequency time once. + freq_time_count_to_add=$(($freq_time_count + 1)) + # Calculate the extra time to add to the previous target time + extra_time=$(($freq_time_count_to_add*$freq_time)) + # Calculate the new target time needed for the next calculation + target_time=$(($target_time + $extra_time)) + # Calculate the wait time + waittime=$(($target_time - $current_time)) + fi + sleep $waittime + last_run=$(date +"%s") + done +fi diff --git a/go.mod b/go.mod new file mode 100644 index 00000000..a43bacf3 --- /dev/null +++ b/go.mod @@ -0,0 +1,86 @@ +module github.com/databacker/mysql-backup + +go 1.19 + +require ( + github.com/aws/aws-sdk-go-v2 v1.19.1 + github.com/aws/aws-sdk-go-v2/config v1.18.30 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.74 + github.com/aws/aws-sdk-go-v2/service/s3 v1.37.1 + github.com/docker/docker v23.0.6+incompatible + github.com/docker/go-connections v0.4.0 + github.com/go-sql-driver/mysql v1.7.1 + github.com/johannesboyne/gofakes3 v0.0.0-20230506070712-04da935ef877 + github.com/moby/moby v23.0.6+incompatible + github.com/robfig/cron/v3 v3.0.1 + github.com/sirupsen/logrus v1.9.0 + github.com/spf13/cobra v1.0.0 + github.com/spf13/pflag v1.0.5 + github.com/spf13/viper v1.6.3 + github.com/stretchr/testify v1.8.4 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + github.com/cloudsoda/go-smb2 v0.0.0-20231106205947-b0758ecc4c67 + github.com/go-test/deep v1.1.0 +) + +require ( + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/aws/aws-sdk-go v1.44.256 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.13.29 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.36 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.30 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.37 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.28 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.31 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.30 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.12.14 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.14 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.20.1 // indirect + github.com/aws/smithy-go v1.13.5 // indirect + github.com/containerd/containerd v1.7.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/docker/distribution v2.8.1+incompatible // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/geoffgarside/ber v1.1.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/klauspost/compress v1.16.5 // indirect + github.com/magiconair/properties v1.8.1 // indirect + github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/moby/patternmatcher v0.5.0 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect + github.com/opencontainers/runc v1.1.7 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect + github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500 // indirect + github.com/spf13/afero v1.2.2 // indirect + github.com/spf13/cast v1.3.0 // indirect + github.com/spf13/jwalterweatherman v1.0.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect + github.com/subosito/gotenv v1.2.0 // indirect + golang.org/x/crypto v0.9.0 // indirect + golang.org/x/mod v0.10.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/sys v0.8.0 // indirect + golang.org/x/text v0.9.0 // indirect + golang.org/x/tools v0.8.0 // indirect + gopkg.in/ini.v1 v1.51.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gotest.tools/v3 v3.4.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000..50f53f1a --- /dev/null +++ b/go.sum @@ -0,0 +1,365 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.10.0-rc.8 h1:YSZVvlIIDD1UxQpJp0h+dnpLUw+TrY0cx8obKsp3bek= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/aws/aws-sdk-go v1.44.256 h1:O8VH+bJqgLDguqkH/xQBFz5o/YheeZqgcOYIgsTVWY4= +github.com/aws/aws-sdk-go v1.44.256/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go-v2 v1.19.1 h1:STs0lbbpXu3byTPcnRLghs2DH0yk9qKDo27TyyJSKsM= +github.com/aws/aws-sdk-go-v2 v1.19.1/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno= +github.com/aws/aws-sdk-go-v2/config v1.18.30 h1:TTAXQIn31qYFUQjkW6siVrRTX1ux+sADZDOe3jsZcMg= +github.com/aws/aws-sdk-go-v2/config v1.18.30/go.mod h1:+YogjT7e/t9JVu/sOnZZgxTge1G+bPNk8zOaI0QIQvE= +github.com/aws/aws-sdk-go-v2/credentials v1.13.29 h1:KNgCpThGuZyCjq9EuuqoLDenKKMwO/x1Xx01ckDa7VI= +github.com/aws/aws-sdk-go-v2/credentials v1.13.29/go.mod h1:VMq1LcmSEa9qxBlOCYTjVuGJWEEzhGmgL552jQsmhss= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.6 h1:kortK122LvTU34CGX/F9oJpelXKkEA2j/MW48II+8+8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.6/go.mod h1:k7IPHyHNIASI0m0RwOmCjWOTtgG+J0raqwuHH8WhWJE= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.74 h1:5iIuHdeN3/x3kFBENHgYQl1ZtD+ZhLBXy6IgXflUtSI= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.74/go.mod h1:kBEg7nSM1Dg9tsHX5eoFeJMmO+njnFOwxP0dPuQCEGc= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.36 h1:kbk81RlPoC6e4co7cQx2FAvH9TgbzxIqCqiosAFiB+w= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.36/go.mod h1:T8Jsn/uNL/AFOXrVYQ1YQaN1r9gN34JU1855/Lyjv+o= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.30 h1:lMl8S5SB8jNCB+Sty2Em4lnu3IJytceHQd7qbmfqKL0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.30/go.mod h1:v3GSCnFxbHzt9dlWBqvA1K1f9lmWuf4ztupZBCAIVs4= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.37 h1:BXiqvN7WuV/pMhz8CivhO8cG8icJcjnjHumif4ukQ0c= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.37/go.mod h1:d4GZ62cjnz/hjKFdAu11gAwK73bdhqaFv2O4J1gaqIs= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.28 h1:mGA+qm0tiLaZ04PfQtxthU3XTZ1sN44YlqVjd+1E+Pk= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.28/go.mod h1:KkWH+0gAmvloVXaVjdY6/LLwQV6TjYOZ1j5JdVm+XBc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 h1:y2+VQzC6Zh2ojtV2LoC0MNwHWc6qXv/j2vrQtlftkdA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.31 h1:TGjmYwqqE6dMDSUSyQNct4MyTAgz95bPnDAjBOEgwOI= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.31/go.mod h1:HvfQ61vGBanxBijrBIpyG32mS9w6fsPZa+BwtV1uQUY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.30 h1:UcVZxLVNY4yayCmiG94Ge3l2qbc5WEB/oa4RmjoQEi0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.30/go.mod h1:wPffyJiWWtHwvpFyn23WjAjVjMnlQOQrl02+vutBh3Y= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.5 h1:B90htdoSv7OMH6QzzZ9cuZUoXVwFml0fTCDOpcGakCw= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.5/go.mod h1:fdxqVm1S6xQa6obwHysh1GPowmyqO2pQuaRPWdyG2iQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.37.1 h1:OoFnDN7ZixctMX/Do4DgQXFvjtzQynz0p0ErQrOCeAs= +github.com/aws/aws-sdk-go-v2/service/s3 v1.37.1/go.mod h1:fBgi8xY80Fv2EveXOoTM008OhKdjrxxtVH0w0h0ozYU= +github.com/aws/aws-sdk-go-v2/service/sso v1.12.14 h1:gUjz7trfz9qBm0AlkKTvJHBXELi1wvw+2LA9GfD2AsM= +github.com/aws/aws-sdk-go-v2/service/sso v1.12.14/go.mod h1:9kfRdJgLCbnyeqZ/DpaSwcgj9ZDYLfRpe8Sze+NrYfQ= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.14 h1:8bEtxV5UT9ucdWGXfZ7CM3caQhSHGjWnTHt0OeF7m7s= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.14/go.mod h1:nd9BG2UnexN2sDx/mk2Jd6pf3d2E61AiA8m8Fdvdx8Y= +github.com/aws/aws-sdk-go-v2/service/sts v1.20.1 h1:U7h9CPoyMfVoN5jUglB0LglCMP10AK4vMBsbsCKM8Yw= +github.com/aws/aws-sdk-go-v2/service/sts v1.20.1/go.mod h1:BUHusg4cOA1TFGegj7x8/eoWrbdHzJfoMrXcbMQAG0k= +github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= +github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudsoda/go-smb2 v0.0.0-20231106205947-b0758ecc4c67 h1:KzZU0EMkUm4vX/jPp5d/VttocDpocL/8QP0zyiI9Xiw= +github.com/cloudsoda/go-smb2 v0.0.0-20231106205947-b0758ecc4c67/go.mod h1:xFxVVe3plxwhM+6BgTTPByEgG8hggo8+gtRUkbc5W8Q= +github.com/containerd/containerd v1.7.1 h1:k8DbDkSOwt5rgxQ3uCI4WMKIJxIndSCBUaGm5oRn+Go= +github.com/containerd/containerd v1.7.1/go.mod h1:gA+nJUADRBm98QS5j5RPROnt0POQSMK+r7P7EGMC/Qc= +github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v23.0.6+incompatible h1:aBD4np894vatVX99UTx/GyOUOK4uEcROwA3+bQhEcoU= +github.com/docker/docker v23.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/geoffgarside/ber v1.1.0 h1:qTmFG4jJbwiSzSXoNJeHcOprVzZ8Ulde2Rrrifu5U9w= +github.com/geoffgarside/ber v1.1.0/go.mod h1:jVPKeCbj6MvQZhwLYsGwaGI52oUorHoHKNecGT85ZCc= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/johannesboyne/gofakes3 v0.0.0-20230506070712-04da935ef877 h1:O7syWuYGzre3s73s+NkgB8e0ZvsIVhT/zxNU7V1gHK8= +github.com/johannesboyne/gofakes3 v0.0.0-20230506070712-04da935ef877/go.mod h1:AxgWC4DDX54O2WDoQO1Ceabtn6IbktjU/7bigor+66g= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= +github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/moby v23.0.6+incompatible h1:Ae0U6PR7n9mdIS7oWCXUqM68tzb09ZVQ3IH8iMoChz0= +github.com/moby/moby v23.0.6+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= +github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo= +github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/runc v1.1.7 h1:y2EZDS8sNng4Ksf0GUYNhKbTShZJPJg1FiXJNH/uoCk= +github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8= +github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= +github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500 h1:WnNuhiq+FOY3jNj6JXFT+eLN3CQ/oPIsDPRanvwsmbI= +github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500/go.mod h1:+njLrG5wSeoG4Ds61rFgEzKvenR2UHbjMoDHsczxly0= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.6.3 h1:pDDu1OyEDTKzpJwdq4TiuLyMsUgRa/BT5cn5O62NoHs= +github.com/spf13/viper v1.6.3/go.mod h1:jUMtyi0/lB5yZH/FjyGAoH7IMNrIhlBf6pXZmbMDvzw= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190829051458-42f498d34c4d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= +golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/main.go b/main.go new file mode 100644 index 00000000..fffd0b8e --- /dev/null +++ b/main.go @@ -0,0 +1,9 @@ +package main + +import ( + "github.com/databacker/mysql-backup/cmd" +) + +func main() { + cmd.Execute() +} diff --git a/pkg/archive/tar.go b/pkg/archive/tar.go new file mode 100644 index 00000000..9bf15afd --- /dev/null +++ b/pkg/archive/tar.go @@ -0,0 +1,127 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "os" + "path/filepath" + "strings" +) + +func Tar(src string, writer io.WriteCloser) error { + + // ensure the src actually exists before trying to tar it + if _, err := os.Stat(src); err != nil { + return fmt.Errorf("unable to tar files - %v", err.Error()) + } + + tw := tar.NewWriter(writer) + // defers are executed via a stack, so LIFO + // important we close the tw before the underlying writer + defer writer.Close() + defer tw.Close() + + // walk path + return filepath.Walk(src, func(file string, fi os.FileInfo, err error) error { + + // return on any error + if err != nil { + return err + } + + // return on non-regular files (thanks to [kumo](https://medium.com/@komuw/just-like-you-did-fbdd7df829d3) for this suggested update) + if !fi.Mode().IsRegular() { + return nil + } + + // create a new dir/file header + header, err := tar.FileInfoHeader(fi, fi.Name()) + if err != nil { + return err + } + + // update the name to correctly reflect the desired destination when untaring + header.Name = strings.TrimPrefix(strings.Replace(file, src, "", -1), string(filepath.Separator)) + + // write the header + if err := tw.WriteHeader(header); err != nil { + return err + } + + // open files for taring + f, err := os.Open(file) + if err != nil { + return err + } + + // copy file data into tar writer + if _, err := io.Copy(tw, f); err != nil { + return err + } + + // manually close here after each file operation; defering would cause each file close + // to wait until all operations have completed. + f.Close() + + return nil + }) +} + +func Untar(r io.Reader, dst string) error { + tr := tar.NewReader(r) + + for { + header, err := tr.Next() + + switch { + + // if no more files are found return + case err == io.EOF: + return nil + + // return any other error + case err != nil: + return err + + // if the header is nil, just skip it (not sure how this happens) + case header == nil: + continue + } + + // the target location where the dir/file should be created + target := filepath.Join(dst, header.Name) + + // the following switch could also be done using fi.Mode(), not sure if there + // a benefit of using one vs. the other. + // fi := header.FileInfo() + + // check the file type + switch header.Typeflag { + + // if its a dir and it doesn't exist create it + case tar.TypeDir: + if _, err := os.Stat(target); err != nil { + if err := os.MkdirAll(target, 0755); err != nil { + return err + } + } + + // if it's a file create it + case tar.TypeReg: + f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) + if err != nil { + return err + } + + // copy over contents + if _, err := io.Copy(f, tr); err != nil { + return err + } + + // manually close here after each file operation; defering would cause each file close + // to wait until all operations have completed. + f.Close() + } + } +} diff --git a/pkg/compression/compressor.go b/pkg/compression/compressor.go new file mode 100644 index 00000000..108df7a2 --- /dev/null +++ b/pkg/compression/compressor.go @@ -0,0 +1,21 @@ +package compression + +import ( + "fmt" + "io" +) + +type Compressor interface { + Uncompress(in io.Reader) (io.Reader, error) + Compress(out io.Writer) io.WriteCloser + Extension() string +} + +func GetCompressor(name string) (Compressor, error) { + switch name { + case "gzip": + return &GzipCompressor{}, nil + default: + return nil, fmt.Errorf("unknown compression format: %s", name) + } +} diff --git a/pkg/compression/gzip.go b/pkg/compression/gzip.go new file mode 100644 index 00000000..3134031c --- /dev/null +++ b/pkg/compression/gzip.go @@ -0,0 +1,20 @@ +package compression + +import ( + "compress/gzip" + "io" +) + +type GzipCompressor struct { +} + +func (g *GzipCompressor) Uncompress(in io.Reader) (io.Reader, error) { + return gzip.NewReader(in) +} + +func (g *GzipCompressor) Compress(out io.Writer) io.WriteCloser { + return gzip.NewWriter(out) +} +func (g *GzipCompressor) Extension() string { + return "tgz" +} diff --git a/pkg/config/type.go b/pkg/config/type.go new file mode 100644 index 00000000..b69cdecb --- /dev/null +++ b/pkg/config/type.go @@ -0,0 +1,233 @@ +package config + +import ( + "fmt" + + "github.com/databacker/mysql-backup/pkg/storage" + "github.com/databacker/mysql-backup/pkg/storage/credentials" + "github.com/databacker/mysql-backup/pkg/storage/s3" + "github.com/databacker/mysql-backup/pkg/storage/smb" + "github.com/databacker/mysql-backup/pkg/util" + "gopkg.in/yaml.v3" +) + +type logLevel string + +//nolint:unused // we expect to use these going forward +const ( + configType = "config.databack.io" + version = "1" + + logLevelError logLevel = "error" + logLevelWarning logLevel = "warning" + logLevelInfo logLevel = "info" + logLevelDebug logLevel = "debug" + logLevelTrace logLevel = "trace" + logLevelDefault logLevel = logLevelInfo +) + +type Config struct { + Type string `yaml:"type"` + Version string `yaml:"version"` + Logging logLevel `yaml:"logging"` + Dump Dump `yaml:"dump"` + Restore Restore `yaml:"restore"` + Database Database `yaml:"database"` + Targets Targets `yaml:"targets"` +} + +type Dump struct { + Include []string `yaml:"include"` + Exclude []string `yaml:"exclude"` + Safechars bool `yaml:"safechars"` + NoDatabaseName bool `yaml:"no-database-name"` + Schedule Schedule `yaml:"schedule"` + Compression string `yaml:"compression"` + Compact bool `yaml:"compact"` + MaxAllowedPacket int `yaml:"max-allowed-packet"` + TmpPath string `yaml:"tmp-path"` + FilenamePattern string `yaml:"filename-pattern"` + Scripts BackupScripts `yaml:"scripts"` + Targets []string `yaml:"targets"` +} + +type Schedule struct { + Once bool `yaml:"once"` + Cron string `yaml:"cron"` + Frequency int `yaml:"frequency"` + Begin string `yaml:"begin"` +} + +type BackupScripts struct { + PreBackup string `yaml:"pre-backup"` + PostBackup string `yaml:"post-backup"` +} + +type Restore struct { + Scripts RestoreScripts `yaml:"scripts"` +} + +type RestoreScripts struct { + PreRestore string `yaml:"pre-restore"` + PostRestore string `yaml:"post-restore"` +} + +type Database struct { + Server string `yaml:"server"` + Port int `yaml:"port"` + Credentials DBCredentials `yaml:"credentials"` +} + +type DBCredentials struct { + Username string `yaml:"username"` + Password string `yaml:"password"` +} + +type Targets map[string]Target + +type Target interface { + Type() string + URL() string + Storage() (storage.Storage, error) // convert to a storage.Storage instance +} + +func (t *Targets) UnmarshalYAML(unmarshal func(interface{}) error) error { + tmpTargets := struct { + Targets map[string]yaml.Node `yaml:"targets"` + }{} + if err := unmarshal(&tmpTargets); err != nil { + return err + } + for key, yamlTarget := range tmpTargets.Targets { + tmpT := struct { + Type string `yaml:"type"` + URL string `yaml:"url"` + }{} + if err := yamlTarget.Decode(&tmpT); err != nil { + return err + } + switch tmpT.Type { + case "s3": + var s3Target S3Target + if err := yamlTarget.Decode(&s3Target); err != nil { + return err + } + s3Target.targetType = tmpT.Type + s3Target.url = tmpT.URL + (*t)[key] = s3Target + case "smb": + var smbTarget SMBTarget + if err := yamlTarget.Decode(&smbTarget); err != nil { + return err + } + smbTarget.targetType = tmpT.Type + smbTarget.url = tmpT.URL + (*t)[key] = smbTarget + case "file": + var fileTarget FileTarget + if err := yamlTarget.Decode(&fileTarget); err != nil { + return err + } + fileTarget.targetType = tmpT.Type + fileTarget.url = tmpT.URL + (*t)[key] = fileTarget + default: + return fmt.Errorf("unknown target type: %s", tmpT.Type) + } + + } + return nil +} + +type S3Target struct { + targetType string `yaml:"type"` + url string `yaml:"url"` + Region string `yaml:"region"` + Endpoint string `yaml:"endpoint"` + Credentials AWSCredentials `yaml:"credentials"` +} + +func (s S3Target) Type() string { + return s.targetType +} +func (s S3Target) URL() string { + return s.url +} +func (s S3Target) Storage() (storage.Storage, error) { + u, err := util.SmartParse(s.url) + if err != nil { + return nil, fmt.Errorf("invalid target url%v", err) + } + opts := []s3.Option{} + if s.Region != "" { + opts = append(opts, s3.WithRegion(s.Region)) + } + if s.Endpoint != "" { + opts = append(opts, s3.WithEndpoint(s.Endpoint)) + } + if s.Credentials.AccessKeyId != "" { + opts = append(opts, s3.WithAccessKeyId(s.Credentials.AccessKeyId)) + } + if s.Credentials.SecretAccessKey != "" { + opts = append(opts, s3.WithSecretAccessKey(s.Credentials.SecretAccessKey)) + } + store := s3.New(*u, opts...) + return store, nil +} + +type AWSCredentials struct { + AccessKeyId string `yaml:"access-key-id"` + SecretAccessKey string `yaml:"secret-access-key"` +} + +type SMBTarget struct { + targetType string `yaml:"type"` + url string `yaml:"url"` + Credentials SMBCredentials `yaml:"credentials"` +} + +func (s SMBTarget) Type() string { + return s.targetType +} +func (s SMBTarget) URL() string { + return s.url +} +func (s SMBTarget) Storage() (storage.Storage, error) { + u, err := util.SmartParse(s.url) + if err != nil { + return nil, fmt.Errorf("invalid target url%v", err) + } + opts := []smb.Option{} + if s.Credentials.Domain != "" { + opts = append(opts, smb.WithDomain(s.Credentials.Domain)) + } + if s.Credentials.Username != "" { + opts = append(opts, smb.WithUsername(s.Credentials.Username)) + } + if s.Credentials.Password != "" { + opts = append(opts, smb.WithPassword(s.Credentials.Password)) + } + store := smb.New(*u, opts...) + return store, nil +} + +type SMBCredentials struct { + Domain string `yaml:"domain"` + Username string `yaml:"username"` + Password string `yaml:"password"` +} + +type FileTarget struct { + targetType string `yaml:"type"` + url string `yaml:"url"` +} + +func (f FileTarget) Type() string { + return f.targetType +} +func (f FileTarget) URL() string { + return f.url +} +func (f FileTarget) Storage() (storage.Storage, error) { + return storage.ParseURL(f.url, credentials.Creds{}) +} diff --git a/pkg/core/dump.go b/pkg/core/dump.go new file mode 100644 index 00000000..141b63e1 --- /dev/null +++ b/pkg/core/dump.go @@ -0,0 +1,244 @@ +package core + +import ( + "fmt" + "os" + "path" + "path/filepath" + "strings" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/databacker/mysql-backup/pkg/archive" + "github.com/databacker/mysql-backup/pkg/database" +) + +const ( + sourceRenameCmd = "/scripts.d/source.sh" + targetRenameCmd = "/scripts.d/target.sh" +) + +// TimerDump runs a dump on a timer +func TimerDump(opts DumpOptions, timerOpts TimerOptions) error { + c, err := Timer(timerOpts) + if err != nil { + log.Errorf("error creating timer: %v", err) + os.Exit(1) + } + // block and wait for it + for update := range c { + if err := Dump(opts); err != nil { + return fmt.Errorf("error backing up: %w", err) + } + if update.Last { + break + } + } + return nil +} + +// Dump run a single dump, based on the provided opts +func Dump(opts DumpOptions) error { + targets := opts.Targets + safechars := opts.Safechars + dbnames := opts.DBNames + dbconn := opts.DBConn + compressor := opts.Compressor + compact := opts.Compact + suppressUseDatabase := opts.SuppressUseDatabase + maxAllowedPacket := opts.MaxAllowedPacket + + now := time.Now() + timepart := now.Format(time.RFC3339) + log.Infof("beginning dump %s", timepart) + if safechars { + timepart = strings.ReplaceAll(timepart, ":", "-") + } + + // sourceFilename: file that the uploader looks for when performing the upload + // targetFilename: the remote file that is actually uploaded + sourceFilename := fmt.Sprintf("db_backup_%s.%s", timepart, compressor.Extension()) + targetFilename := sourceFilename + + // create a temporary working directory + tmpdir, err := os.MkdirTemp("", "databacker_backup") + if err != nil { + return fmt.Errorf("failed to make temporary working directory: %v", err) + } + defer os.RemoveAll(tmpdir) + // execute pre-backup scripts if any + if err := preBackup(timepart, path.Join(tmpdir, targetFilename), tmpdir, opts.PreBackupScripts, log.GetLevel() == log.DebugLevel); err != nil { + return fmt.Errorf("error running pre-restore: %v", err) + } + + // do the dump(s) + workdir, err := os.MkdirTemp("", "databacker_cache") + if err != nil { + return fmt.Errorf("failed to make temporary cache directory: %v", err) + } + defer os.RemoveAll(workdir) + + dw := make([]database.DumpWriter, 0) + + // do we split the output by schema, or one big dump file? + if len(dbnames) == 0 { + if dbnames, err = database.GetSchemas(dbconn); err != nil { + return fmt.Errorf("failed to list database schemas: %v", err) + } + } + for _, s := range dbnames { + outFile := path.Join(workdir, fmt.Sprintf("%s_%s.sql", s, timepart)) + f, err := os.Create(outFile) + if err != nil { + return fmt.Errorf("failed to create dump file '%s': %v", outFile, err) + } + dw = append(dw, database.DumpWriter{ + Schemas: []string{s}, + Writer: f, + }) + } + if err := database.Dump(dbconn, database.DumpOpts{ + Compact: compact, + SuppressUseDatabase: suppressUseDatabase, + MaxAllowedPacket: maxAllowedPacket, + }, dw); err != nil { + return fmt.Errorf("failed to dump database: %v", err) + } + + // create my tar writer to archive it all together + // WRONG: THIS WILL CAUSE IT TO TRY TO LOOP BACK ON ITSELF + outFile := path.Join(tmpdir, sourceFilename) + f, err := os.OpenFile(outFile, os.O_CREATE|os.O_WRONLY, 0o644) + if err != nil { + return fmt.Errorf("failed to open output file '%s': %v", outFile, err) + } + defer f.Close() + cw := compressor.Compress(f) + if err := archive.Tar(workdir, cw); err != nil { + return fmt.Errorf("error creating the compressed archive: %v", err) + } + // we need to close it explicitly before moving ahead + f.Close() + + // execute post-backup scripts if any + if err := postBackup(timepart, path.Join(tmpdir, targetFilename), tmpdir, opts.PostBackupScripts, log.GetLevel() == log.DebugLevel); err != nil { + return fmt.Errorf("error running pre-restore: %v", err) + } + + // perform any renaming + newName, err := renameSource(timepart, path.Join(tmpdir, targetFilename), tmpdir, log.GetLevel() == log.DebugLevel) + if err != nil { + return fmt.Errorf("failed rename source: %v", err) + } + if newName != "" { + sourceFilename = newName + } + + // perform any renaming + newName, err = renameTarget(timepart, path.Join(tmpdir, targetFilename), tmpdir, log.GetLevel() == log.DebugLevel) + if err != nil { + return fmt.Errorf("failed rename target: %v", err) + } + if newName != "" { + targetFilename = newName + } + + // upload to each destination + for _, t := range targets { + log.Debugf("uploading via protocol %s from %s", t.Protocol(), targetFilename) + copied, err := t.Push(targetFilename, filepath.Join(tmpdir, sourceFilename)) + if err != nil { + return fmt.Errorf("failed to push file: %v", err) + } + log.Debugf("completed copying %d bytes", copied) + } + + return nil +} + +// run pre-backup scripts, if they exist +func preBackup(timestamp, dumpfile, dumpdir, preBackupDir string, debug bool) error { + // construct any additional environment + env := map[string]string{ + "NOW": timestamp, + "DUMPFILE": dumpfile, + "DUMPDIR": dumpdir, + "DB_DUMP_DEBUG": fmt.Sprintf("%v", debug), + } + return runScripts(preBackupDir, env) +} + +func postBackup(timestamp, dumpfile, dumpdir, postBackupDir string, debug bool) error { + // construct any additional environment + env := map[string]string{ + "NOW": timestamp, + "DUMPFILE": dumpfile, + "DUMPDIR": dumpdir, + "DB_DUMP_DEBUG": fmt.Sprintf("%v", debug), + } + return runScripts(postBackupDir, env) +} + +func renameSource(timestamp, dumpfile, dumpdir string, debug bool) (string, error) { + _, err := os.Stat(sourceRenameCmd) + if err != nil && os.IsNotExist(err) { + return "", nil + } + if err != nil { + return "", fmt.Errorf("error reading rename scrpt %s: %v", sourceRenameCmd, err) + } + env := map[string]string{ + "NOW": timestamp, + "DUMPFILE": path.Join(dumpdir, dumpfile), + "DUMPDIR": dumpdir, + "DB_DUMP_DEBUG": fmt.Sprintf("%v", debug), + } + + // it exists so try to run it + results, err := oneScript(sourceRenameCmd, env) + if err != nil { + return "", fmt.Errorf("error executing rename script %s: %v", sourceRenameCmd, err) + } + results = trimBadChars(results) + newName := strings.TrimSpace(string(results)) + + return newName, nil +} + +func renameTarget(timestamp, dumpfile, dumpdir string, debug bool) (string, error) { + _, err := os.Stat(targetRenameCmd) + if err != nil && os.IsNotExist(err) { + return "", nil + } + if err != nil { + return "", fmt.Errorf("error reading rename script %s: %v", targetRenameCmd, err) + } + env := map[string]string{ + "NOW": timestamp, + "DUMPFILE": path.Join(dumpdir, dumpfile), + "DUMPDIR": dumpdir, + "DB_DUMP_DEBUG": fmt.Sprintf("%v", debug), + } + + // it exists so try to run it + results, err := oneScript(targetRenameCmd, env) + if err != nil { + return "", fmt.Errorf("error executing rename script %s: %v", targetRenameCmd, err) + } + results = trimBadChars(results) + newName := strings.TrimSpace(string(results)) + + return newName, nil +} + +// trimBadChars eliminate these characters '\040\011\012\015' +func trimBadChars(b []byte) []byte { + out := make([]byte, 0) + for _, c := range b { + if c != 040 && c != 011 && c != 012 && c != 015 { + out = append(out, c) + } + } + return out +} diff --git a/pkg/core/dumpoptions.go b/pkg/core/dumpoptions.go new file mode 100644 index 00000000..4a14f347 --- /dev/null +++ b/pkg/core/dumpoptions.go @@ -0,0 +1,21 @@ +package core + +import ( + "github.com/databacker/mysql-backup/pkg/compression" + "github.com/databacker/mysql-backup/pkg/database" + "github.com/databacker/mysql-backup/pkg/storage" +) + +type DumpOptions struct { + Targets []storage.Storage + Safechars bool + DBNames []string + DBConn database.Connection + Compressor compression.Compressor + Exclude []string + PreBackupScripts string + PostBackupScripts string + Compact bool + SuppressUseDatabase bool + MaxAllowedPacket int +} diff --git a/pkg/core/restore.go b/pkg/core/restore.go new file mode 100644 index 00000000..6f36fa0e --- /dev/null +++ b/pkg/core/restore.go @@ -0,0 +1,105 @@ +package core + +import ( + "fmt" + "io" + "os" + "path" + + log "github.com/sirupsen/logrus" + + "github.com/databacker/mysql-backup/pkg/archive" + "github.com/databacker/mysql-backup/pkg/compression" + "github.com/databacker/mysql-backup/pkg/database" + "github.com/databacker/mysql-backup/pkg/storage" +) + +const ( + preRestoreDir = "/scripts.d/pre-restore" + postRestoreDir = "/scripts.d/post-restore" + tmpRestoreFile = "/tmp/restorefile" +) + +// Restore restore a specific backup into the database +func Restore(target storage.Storage, targetFile string, dbconn database.Connection, databasesMap map[string]string, compressor compression.Compressor) error { + log.Info("beginning restore") + // execute pre-restore scripts if any + if err := preRestore(target.URL()); err != nil { + return fmt.Errorf("error running pre-restore: %v", err) + } + + log.Debugf("restoring via %s protocol, temporary file location %s", target.Protocol(), tmpRestoreFile) + + copied, err := target.Pull(targetFile, tmpRestoreFile) + if err != nil { + return fmt.Errorf("failed to pull target %s: %v", target, err) + } + log.Debugf("completed copying %d bytes", copied) + + // successfully download file, now restore it + tmpdir, err := os.MkdirTemp("", "restore") + if err != nil { + return fmt.Errorf("unable to create temporary working directory: %v", err) + } + defer os.RemoveAll(tmpdir) + f, err := os.Open(tmpRestoreFile) + if f == nil { + return fmt.Errorf("unable to read the temporary download file: %v", err) + } + defer f.Close() + os.Remove(tmpRestoreFile) + + // create my tar reader to put the files in the directory + cr, err := compressor.Uncompress(f) + if err != nil { + return fmt.Errorf("unable to create an uncompressor: %v", err) + } + if err := archive.Untar(cr, tmpdir); err != nil { + return fmt.Errorf("error extracting the file: %v", err) + } + + // run through each file and apply it + files, err := os.ReadDir(tmpdir) + if err != nil { + return fmt.Errorf("failed to find extracted files to restore: %v", err) + } + readers := make([]io.ReadSeeker, 0) + for _, f := range files { + // ignore directories + if f.IsDir() { + continue + } + file, err := os.Open(path.Join(tmpdir, f.Name())) + if err != nil { + continue + } + defer file.Close() + readers = append(readers, file) + } + if err := database.Restore(dbconn, databasesMap, readers); err != nil { + return fmt.Errorf("failed to restore database: %v", err) + } + + // execute post-restore scripts if any + if err := postRestore(target.URL()); err != nil { + return fmt.Errorf("error running post-restove: %v", err) + } + return nil +} + +// run pre-restore scripts, if they exist +func preRestore(target string) error { + // construct any additional environment + env := map[string]string{ + "DB_RESTORE_TARGET": target, + } + return runScripts(preRestoreDir, env) +} + +func postRestore(target string) error { + // construct any additional environment + env := map[string]string{ + "DB_RESTORE_TARGET": target, + } + return runScripts(postRestoreDir, env) +} diff --git a/pkg/core/scripts.go b/pkg/core/scripts.go new file mode 100644 index 00000000..afc2b5fe --- /dev/null +++ b/pkg/core/scripts.go @@ -0,0 +1,56 @@ +package core + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path" +) + +func runScripts(dir string, env map[string]string) error { + files, err := os.ReadDir(dir) + // if the directory does not exist, do not worry about it + if err != nil && os.IsNotExist(err) { + return nil + } + for _, f := range files { + // ignore directories and any files we cannot execute + fi, err := f.Info() + if err != nil { + return fmt.Errorf("error getting file info %s: %v", f.Name(), err) + } + if f.IsDir() || fi.Mode()&0111 == 0 { + continue + } + // execute the file + envSlice := os.Environ() + for k, v := range env { + envSlice = append(envSlice, fmt.Sprintf("%s=%s", k, v)) + } + cmd := exec.Command(path.Join(dir, f.Name())) + cmd.Env = envSlice + if err := cmd.Run(); err != nil { + return fmt.Errorf("error running file %s: %v", f.Name(), err) + } + } + return nil +} + +func oneScript(target string, env map[string]string) ([]byte, error) { + // execute the file + envSlice := os.Environ() + for k, v := range env { + envSlice = append(envSlice, fmt.Sprintf("%s=%s", k, v)) + } + cmd := exec.Command(target) + cmd.Env = envSlice + var stdout bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("error running file %s: %v", target, err) + } + return stdout.Bytes(), nil +} diff --git a/pkg/core/timer.go b/pkg/core/timer.go new file mode 100644 index 00000000..37872b77 --- /dev/null +++ b/pkg/core/timer.go @@ -0,0 +1,164 @@ +package core + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "time" + + "github.com/robfig/cron/v3" +) + +type TimerOptions struct { + Once bool + Cron string + Begin string + Frequency int +} + +type Update struct { + // Last whether or not this is the last update, and no more will be coming. + // If true, perform this action and then end. + Last bool +} + +func sendTimer(c chan Update, last bool) { + // make the channel write non-blocking + select { + case c <- Update{Last: last}: + default: + } +} + +// Time start a timer that tells when to run an activity, based on its options. +// Each time to run an activity is indicated via a message in a channel. +func Timer(opts TimerOptions) (<-chan Update, error) { + var ( + delay time.Duration + err error + ) + + now := time.Now() + + // validate we do not have conflicting options + if opts.Once && (opts.Cron != "" || opts.Begin != "" || opts.Frequency != 0) { + return nil, errors.New("option 'Once' is exclusive and must not be used with Begin, Cron or Frequency") + } + + if opts.Cron != "" && (opts.Begin != "" || opts.Frequency != 0) { + return nil, errors.New("option 'Cron' is exclusive and must not be used with Begin, Once or Frequency") + } + + // parse the options to determine our delays + if opts.Cron != "" { + // calculate delay until next cron moment as defined + delay, err = waitForCron(opts.Cron, now) + if err != nil { + return nil, fmt.Errorf("invalid cron format '%s': %v", opts.Cron, err) + } + } + if opts.Begin != "" { + // calculate how long to wait + minsRe, err := regexp.Compile(`^\+([0-9]+)$`) + if err != nil { + return nil, fmt.Errorf("invalid matcher for checking begin delay options: %v", err) + } + timeRe, err := regexp.Compile(`([0-9][0-9])([0-9][0-9])`) + if err != nil { + return nil, fmt.Errorf("invalid matcher for checking begin delay options: %v", err) + } + + // first look for +MM, which means delay MM minutes + delayMinsParts := minsRe.FindStringSubmatch(opts.Begin) + startTimeParts := timeRe.FindStringSubmatch(opts.Begin) + + switch { + case len(delayMinsParts) > 1: + delayMins, err := strconv.Atoi(delayMinsParts[1]) + if err != nil { + return nil, fmt.Errorf("invalid format for begin delay '%s': %v", opts.Begin, err) + } + delay = time.Duration(delayMins) * time.Minute + case len(startTimeParts) > 3: + hour, err := strconv.Atoi(startTimeParts[1]) + if err != nil { + return nil, fmt.Errorf("invalid format for begin delay '%s': %v", opts.Begin, err) + } + minute, err := strconv.Atoi(startTimeParts[2]) + if err != nil { + return nil, fmt.Errorf("invalid format for begin delay '%s': %v", opts.Begin, err) + } + + // convert that start time into a Duration to wait + now := time.Now() + + today := time.Date(now.Year(), now.Month(), now.Day(), hour, minute, now.Second(), now.Nanosecond(), time.UTC) + if today.After(now) { + delay = today.Sub(now) + } else { + // add one day + delay = today.Add(24 * time.Hour).Sub(now) + } + default: + return nil, fmt.Errorf("invalid format for begin delay '%s': %v", opts.Begin, err) + } + } + + // if delayMins is 0, this will do nothing, so it does not hurt + time.Sleep(delay) + + c := make(chan Update) + go func(opts TimerOptions) { + // when this goroutine ends, close the channel + defer close(c) + + // if once, ignore all delays and go + if opts.Once { + sendTimer(c, true) + return + } + + // create our delay and timer loop and go + for { + lastRun := time.Now() + + // not once - run the first backup + sendTimer(c, false) + + if opts.Cron != "" { + delay, _ = waitForCron(opts.Cron, now) + } else { + // calculate how long until the next run + // just take our last start time, and add the frequency until it is past our + // current time. We cannot just take the last time and add, + // because it might have been during a backup run + now := time.Now() + diff := int(now.Sub(lastRun).Minutes()) + // make sure we at least wait one full frequency + if diff == 0 { + diff += opts.Frequency + } + passed := diff % opts.Frequency + delay = time.Duration(opts.Frequency-passed) * time.Minute + } + + // if delayMins is 0, this will do nothing, so it does not hurt + time.Sleep(delay) + } + }(opts) + return c, nil +} + +// waitForCron given the current time and a cron string, calculate the Duration +// until the next time we will match the cron +func waitForCron(cronExpr string, from time.Time) (time.Duration, error) { + sched, err := cron.ParseStandard(cronExpr) + if err != nil { + return time.Duration(0), err + } + // sched.Next() returns the next time that the cron expression will match, beginning in 1ns; + // we allow matching current time, so we do it from 1ns + next := sched.Next(from.Add(-1 * time.Nanosecond)) + return next.Sub(from), nil +} diff --git a/pkg/core/timer_test.go b/pkg/core/timer_test.go new file mode 100644 index 00000000..f24db19b --- /dev/null +++ b/pkg/core/timer_test.go @@ -0,0 +1,38 @@ +package core + +import ( + "testing" + "time" +) + +func TestWaitForCron(t *testing.T) { + tests := []struct { + name string + cron string + from string + wait time.Duration + err error + }{ + {"current minute", "1 * * * *", "2018-10-10T10:01:00Z", 0, nil}, + {"next minute", "1 * * * *", "2018-10-10T10:00:00Z", 1 * time.Minute, nil}, + {"next day by hour", "* 1 * * *", "2018-10-10T10:00:00Z", 15 * time.Hour, nil}, + {"current minute but seconds in", "1 * * * *", "2018-10-10T10:01:10Z", 59*time.Minute + 50*time.Second, nil}, // this line tests that we use the current minute, and not wait for "-10" + {"midnight next day", "0 0 * * *", "2021-11-30T10:00:00Z", 14 * time.Hour, nil}, + {"first day next month in next year", "0 0 1 * *", "2020-12-30T10:00:00Z", 14*time.Hour + 24*time.Hour, nil}, // this line tests that we can handle rolling month correctly + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + from, err := time.Parse(time.RFC3339, tt.from) + if err != nil { + t.Fatalf("unable to parse from %s: %v", tt.from, err) + } + result, err := waitForCron(tt.cron, from) + switch { + case (err != nil && tt.err == nil) || (err == nil && tt.err != nil) || (err != nil && tt.err != nil && err.Error() != tt.err.Error()): + t.Errorf("waitForCron(%s, %s) error = %v, wantErr %v", tt.cron, tt.from, err, tt.err) + case result != tt.wait: + t.Errorf("waitForCron(%s, %s) = %v, want %v", tt.cron, tt.from, result, tt.wait) + } + }) + } +} diff --git a/pkg/database/connection.go b/pkg/database/connection.go new file mode 100644 index 00000000..ec41586b --- /dev/null +++ b/pkg/database/connection.go @@ -0,0 +1,23 @@ +package database + +import ( + "fmt" + + mysql "github.com/go-sql-driver/mysql" +) + +type Connection struct { + User string + Pass string + Host string + Port int +} + +func (c Connection) MySQL() string { + config := mysql.NewConfig() + config.User = c.User + config.Passwd = c.Pass + config.Net = "tcp" + config.Addr = fmt.Sprintf("%s:%d", c.Host, c.Port) + return config.FormatDSN() +} diff --git a/pkg/database/dump.go b/pkg/database/dump.go new file mode 100644 index 00000000..52195272 --- /dev/null +++ b/pkg/database/dump.go @@ -0,0 +1,48 @@ +package database + +import ( + "database/sql" + "fmt" + + "github.com/databacker/mysql-backup/pkg/database/mysql" +) + +type DumpOpts struct { + Compact bool + SuppressUseDatabase bool + MaxAllowedPacket int +} + +func Dump(dbconn Connection, opts DumpOpts, writers []DumpWriter) error { + + // TODO: dump data for each writer: + // per schema + // mysqldump --databases ${onedb} $MYSQLDUMP_OPTS + // all at once + // mysqldump -A $MYSQLDUMP_OPTS + // all at once limited to some databases + // mysqldump --databases $DB_NAMES $MYSQLDUMP_OPTS + for _, writer := range writers { + db, err := sql.Open("mysql", dbconn.MySQL()) + if err != nil { + return fmt.Errorf("failed to open connection to database: %v", err) + } + defer db.Close() + for _, schema := range writer.Schemas { + dumper := &mysql.Data{ + Out: writer.Writer, + Connection: db, + Schema: schema, + Host: dbconn.Host, + Compact: opts.Compact, + SuppressUseDatabase: opts.SuppressUseDatabase, + MaxAllowedPacket: opts.MaxAllowedPacket, + } + if err := dumper.Dump(); err != nil { + return fmt.Errorf("failed to dump database %s: %v", schema, err) + } + } + } + + return nil +} diff --git a/pkg/database/dumpwriter.go b/pkg/database/dumpwriter.go new file mode 100644 index 00000000..a3f6edc5 --- /dev/null +++ b/pkg/database/dumpwriter.go @@ -0,0 +1,10 @@ +package database + +import ( + "io" +) + +type DumpWriter struct { + Schemas []string + Writer io.Writer +} diff --git a/pkg/database/mysql/dump.go b/pkg/database/mysql/dump.go new file mode 100644 index 00000000..6efebd99 --- /dev/null +++ b/pkg/database/mysql/dump.go @@ -0,0 +1,595 @@ +/* +* with thanks to https://github.com/BrandonRoehl/go-mysqldump which required some changes, +* but was under MIT. +* +* We might have been able to use it as is, except for this when running `go get`: + +go: finding module for package github.com/BrandonRoehl/go-mysqldump +go: found github.com/BrandonRoehl/go-mysqldump in github.com/BrandonRoehl/go-mysqldump v0.5.1 +go: github.com/databacker/mysql-backup/pkg/database imports + + github.com/BrandonRoehl/go-mysqldump: github.com/BrandonRoehl/go-mysqldump@v0.5.1: parsing go.mod: + module declares its path as: github.com/jamf/go-mysqldump + but was required as: github.com/BrandonRoehl/go-mysqldump +*/ +package mysql + +import ( + "bytes" + "context" + "database/sql" + "errors" + "fmt" + "io" + "reflect" + "strings" + "text/template" + "time" +) + +/* +Data struct to configure dump behavior + + Out: Stream to wite to + Connection: Database connection to dump + IgnoreTables: Mark sensitive tables to ignore + MaxAllowedPacket: Sets the largest packet size to use in backups + LockTables: Lock all tables for the duration of the dump +*/ +type Data struct { + Out io.Writer + Connection *sql.DB + IgnoreTables []string + MaxAllowedPacket int + LockTables bool + Schema string + Compact bool + Host string + SuppressUseDatabase bool + + tx *sql.Tx + headerTmpl *template.Template + tableTmpl *template.Template + footerTmpl *template.Template + err error +} + +type table struct { + Name string + Err error + + cols []string + data *Data + rows *sql.Rows + values []interface{} +} + +type metaData struct { + DumpVersion string + ServerVersion string + CompleteTime string + Host string + Database string +} + +const ( + // Version of this plugin for easy reference + Version = "0.6.0" + + defaultMaxAllowedPacket = 4194304 +) + +// takes a *metaData +const headerTmpl = `-- Go SQL Dump {{ .DumpVersion }} +-- +-- Host: {{.Host}} Database: {{.Database}} +-- ------------------------------------------------------ +-- Server version {{ .ServerVersion }} + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!50503 SET NAMES utf8mb4 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; + +-- +-- Current Database: ` + "`{{.Database}}`" + ` +-- +` + +const createUseDatabaseHeader = ` +CREATE DATABASE /*!32312 IF NOT EXISTS*/ ` + "`{{.Database}}`" + ` /*!40100 DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci */ /*!80016 DEFAULT ENCRYPTION='N' */; + +USE ` + "`{{.Database}}`;" + +// takes a *metaData +const footerTmpl = `/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +-- Dump completed on {{ .CompleteTime }}` + +const footerTmplCompact = `` + +// Takes a *table +const tableTmpl = ` +-- +-- Table structure for table {{ .NameEsc }} +-- + +DROP TABLE IF EXISTS {{ .NameEsc }}; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!50503 SET character_set_client = utf8mb4 */; +{{ .CreateSQL }}; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Dumping data for table {{ .NameEsc }} +-- + +LOCK TABLES {{ .NameEsc }} WRITE; +/*!40000 ALTER TABLE {{ .NameEsc }} DISABLE KEYS */; +{{ range $value := .Stream }} +{{- $value }} +{{ end -}} +/*!40000 ALTER TABLE {{ .NameEsc }} ENABLE KEYS */; +UNLOCK TABLES; +` + +const tableTmplCompact = ` +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!50503 SET character_set_client = utf8mb4 */; +{{ .CreateSQL }}; +/*!40101 SET character_set_client = @saved_cs_client */; +{{ range $value := .Stream }} +{{- $value }} +{{ end -}} +` + +const nullType = "NULL" + +// Dump data using struct +func (data *Data) Dump() error { + meta := metaData{ + DumpVersion: Version, + Host: data.Host, + Database: data.Schema, + } + + if data.MaxAllowedPacket == 0 { + data.MaxAllowedPacket = defaultMaxAllowedPacket + } + + if err := data.getTemplates(); err != nil { + return err + } + + if err := data.selectSchema(); err != nil { + return err + } + + // Start the read only transaction and defer the rollback until the end + // This way the database will have the exact state it did at the begining of + // the backup and nothing can be accidentally committed + if err := data.begin(); err != nil { + return err + } + defer func() { + _ = data.rollback() + }() + + if err := meta.updateServerVersion(data); err != nil { + return err + } + + if err := data.headerTmpl.Execute(data.Out, meta); err != nil { + return err + } + + tables, err := data.getTables() + if err != nil { + return err + } + + // Lock all tables before dumping if present + if data.LockTables && len(tables) > 0 { + var b bytes.Buffer + b.WriteString("LOCK TABLES ") + for index, name := range tables { + if index != 0 { + b.WriteString(",") + } + b.WriteString("`" + name + "` READ /*!32311 LOCAL */") + } + + if _, err := data.Connection.Exec(b.String()); err != nil { + return err + } + + defer func() { + _, _ = data.Connection.Exec("UNLOCK TABLES") + }() + } + + for _, name := range tables { + if err := data.dumpTable(name); err != nil { + return err + } + } + if data.err != nil { + return data.err + } + + meta.CompleteTime = time.Now().UTC().Format("2006-01-02 15:04:05") + return data.footerTmpl.Execute(data.Out, meta) +} + +// MARK: - Private methods + +// selectSchema selects a specific schema to use +func (data *Data) selectSchema() error { + if data.Schema == "" { + return errors.New("cannot select schema when one is not provided") + } + _, err := data.Connection.Exec("USE `" + data.Schema + "`") + return err +} + +// begin starts a read only transaction that will be whatever the database was +// when it was called +func (data *Data) begin() (err error) { + data.tx, err = data.Connection.BeginTx(context.Background(), &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }) + return +} + +// rollback cancels the transaction +func (data *Data) rollback() error { + return data.tx.Rollback() +} + +// MARK: writter methods + +func (data *Data) dumpTable(name string) error { + if data.err != nil { + return data.err + } + table := data.createTable(name) + return data.writeTable(table) +} + +func (data *Data) writeTable(table *table) error { + if err := data.tableTmpl.Execute(data.Out, table); err != nil { + return err + } + return table.Err +} + +// MARK: get methods + +// getTemplates initializes the templates on data from the constants in this file +func (data *Data) getTemplates() (err error) { + var hTmpl string + fTmpl := footerTmpl + tTmpl := tableTmpl + if data.Compact { + fTmpl = footerTmplCompact + tTmpl = tableTmplCompact + } else { + hTmpl = headerTmpl + } + // do we include the `USE database;` in the dump? + if !data.SuppressUseDatabase { + hTmpl += createUseDatabaseHeader + // non-compact has an extra carriage return; no idea why + if !data.Compact { + hTmpl += "\n" + } + } + data.headerTmpl, err = template.New("mysqldumpHeader").Parse(hTmpl) + if err != nil { + return + } + + data.tableTmpl, err = template.New("mysqldumpTable").Parse(tTmpl) + if err != nil { + return + } + + data.footerTmpl, err = template.New("mysqldumpTable").Parse(fTmpl) + if err != nil { + return + } + return +} + +func (data *Data) getTables() ([]string, error) { + tables := make([]string, 0) + + rows, err := data.tx.Query("SHOW TABLES") + if err != nil { + return tables, err + } + defer rows.Close() + + for rows.Next() { + var table sql.NullString + if err := rows.Scan(&table); err != nil { + return tables, err + } + if table.Valid && !data.isIgnoredTable(table.String) { + tables = append(tables, table.String) + } + } + return tables, rows.Err() +} + +func (data *Data) isIgnoredTable(name string) bool { + for _, item := range data.IgnoreTables { + if item == name { + return true + } + } + return false +} + +func (meta *metaData) updateServerVersion(data *Data) (err error) { + var serverVersion sql.NullString + err = data.tx.QueryRow("SELECT version()").Scan(&serverVersion) + meta.ServerVersion = serverVersion.String + return +} + +// MARK: create methods + +func (data *Data) createTable(name string) *table { + return &table{ + Name: name, + data: data, + } +} + +func (table *table) NameEsc() string { + return "`" + table.Name + "`" +} + +func (table *table) CreateSQL() (string, error) { + var tableReturn, tableSQL sql.NullString + if err := table.data.tx.QueryRow("SHOW CREATE TABLE "+table.NameEsc()).Scan(&tableReturn, &tableSQL); err != nil { + return "", err + } + + if tableReturn.String != table.Name { + return "", errors.New("Returned table is not the same as requested table") + } + + return tableSQL.String, nil +} + +func (table *table) initColumnData() error { + colInfo, err := table.data.tx.Query("SHOW COLUMNS FROM " + table.NameEsc()) + if err != nil { + return err + } + defer colInfo.Close() + + cols, err := colInfo.Columns() + if err != nil { + return err + } + + fieldIndex, extraIndex := -1, -1 + for i, col := range cols { + switch col { + case "Field", "field": + fieldIndex = i + case "Extra", "extra": + extraIndex = i + } + if fieldIndex >= 0 && extraIndex >= 0 { + break + } + } + if fieldIndex < 0 || extraIndex < 0 { + return errors.New("database column information is malformed") + } + + info := make([]sql.NullString, len(cols)) + scans := make([]interface{}, len(cols)) + for i := range info { + scans[i] = &info[i] + } + + var result []string + for colInfo.Next() { + // Read into the pointers to the info marker + if err := colInfo.Scan(scans...); err != nil { + return err + } + + // Ignore the virtual columns + if !info[extraIndex].Valid || !strings.Contains(info[extraIndex].String, "VIRTUAL") { + result = append(result, info[fieldIndex].String) + } + } + table.cols = result + return nil +} + +func (table *table) columnsList() string { + return "`" + strings.Join(table.cols, "`, `") + "`" +} + +func (table *table) Init() error { + if len(table.values) != 0 { + return errors.New("can't init twice") + } + + if err := table.initColumnData(); err != nil { + return err + } + + if len(table.cols) == 0 { + // No data to dump since this is a virtual table + return nil + } + + var err error + table.rows, err = table.data.tx.Query("SELECT " + table.columnsList() + " FROM " + table.NameEsc()) + if err != nil { + return err + } + + tt, err := table.rows.ColumnTypes() + if err != nil { + return err + } + + table.values = make([]interface{}, len(tt)) + for i, tp := range tt { + table.values[i] = reflect.New(reflectColumnType(tp)).Interface() + } + return nil +} + +func reflectColumnType(tp *sql.ColumnType) reflect.Type { + // reflect for scanable + switch tp.ScanType().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return reflect.TypeOf(sql.NullInt64{}) + case reflect.Float32, reflect.Float64: + return reflect.TypeOf(sql.NullFloat64{}) + case reflect.String: + return reflect.TypeOf(sql.NullString{}) + } + + // determine by name + switch tp.DatabaseTypeName() { + case "BLOB", "BINARY": + return reflect.TypeOf(sql.RawBytes{}) + case "VARCHAR", "TEXT", "DECIMAL": + return reflect.TypeOf(sql.NullString{}) + case "BIGINT", "TINYINT", "INT": + return reflect.TypeOf(sql.NullInt64{}) + case "DOUBLE": + return reflect.TypeOf(sql.NullFloat64{}) + } + + // unknown datatype + return tp.ScanType() +} + +func (table *table) Next() bool { + if table.rows == nil { + if err := table.Init(); err != nil { + table.Err = err + return false + } + } + // Fallthrough + if table.rows.Next() { + if err := table.rows.Scan(table.values...); err != nil { + table.Err = err + return false + } else if err := table.rows.Err(); err != nil { + table.Err = err + return false + } + } else { + table.rows.Close() + table.rows = nil + return false + } + return true +} + +func (table *table) RowValues() string { + return table.RowBuffer().String() +} + +func (table *table) RowBuffer() *bytes.Buffer { + var b bytes.Buffer + b.WriteString("(") + + for key, value := range table.values { + if key != 0 { + b.WriteString(",") + } + switch s := value.(type) { + case nil: + b.WriteString(nullType) + case *sql.NullString: + if s.Valid { + fmt.Fprintf(&b, "'%s'", sanitize(s.String)) + } else { + b.WriteString(nullType) + } + case *sql.NullInt64: + if s.Valid { + fmt.Fprintf(&b, "%d", s.Int64) + } else { + b.WriteString(nullType) + } + case *sql.NullFloat64: + if s.Valid { + fmt.Fprintf(&b, "%f", s.Float64) + } else { + b.WriteString(nullType) + } + case *sql.RawBytes: + if len(*s) == 0 { + b.WriteString(nullType) + } else { + fmt.Fprintf(&b, "_binary '%s'", sanitize(string(*s))) + } + default: + fmt.Fprintf(&b, "'%s'", value) + } + } + b.WriteString(")") + + return &b +} + +func (table *table) Stream() <-chan string { + valueOut := make(chan string, 1) + go func() { + defer close(valueOut) + var insert bytes.Buffer + + for table.Next() { + b := table.RowBuffer() + // Truncate our insert if it won't fit + if insert.Len() != 0 && insert.Len()+b.Len() > table.data.MaxAllowedPacket-1 { + _, _ = insert.WriteString(";") + valueOut <- insert.String() + insert.Reset() + } + + if insert.Len() == 0 { + _, _ = fmt.Fprint(&insert, "INSERT INTO ", table.NameEsc(), " VALUES ") + } else { + _, _ = insert.WriteString(",") + } + _, _ = b.WriteTo(&insert) + } + if insert.Len() != 0 { + _, _ = insert.WriteString(";") + valueOut <- insert.String() + } + }() + return valueOut +} diff --git a/pkg/database/mysql/sanitize.go b/pkg/database/mysql/sanitize.go new file mode 100644 index 00000000..448e1a59 --- /dev/null +++ b/pkg/database/mysql/sanitize.go @@ -0,0 +1,27 @@ +package mysql + +import "strings" + +var lazyMySQLReplacer *strings.Replacer + +// sanitize MySQL based on +// https://dev.mysql.com/doc/refman/8.0/en/string-literals.html table 9.1 +// needs to be placed in either a single or a double quoted string +func sanitize(input string) string { + if lazyMySQLReplacer == nil { + lazyMySQLReplacer = strings.NewReplacer( + "\x00", "\\0", + "'", "\\'", + "\"", "\\\"", + "\b", "\\b", + "\n", "\\n", + "\r", "\\r", + // "\t", "\\t", Tab literals are acceptable in reads + "\x1A", "\\Z", // ASCII 26 == x1A + "\\", "\\\\", + // "%", "\\%", + // "_", "\\_", + ) + } + return lazyMySQLReplacer.Replace(input) +} diff --git a/pkg/database/restore.go b/pkg/database/restore.go new file mode 100644 index 00000000..39375756 --- /dev/null +++ b/pkg/database/restore.go @@ -0,0 +1,68 @@ +package database + +import ( + "bufio" + "context" + "database/sql" + "fmt" + "io" + "regexp" +) + +var ( + useRegex = regexp.MustCompile(`(?i)^(USE\s*` + "`" + `)([^\s]+)(` + "`" + `\s*;)$`) + createRegex = regexp.MustCompile(`(?i)^(CREATE\s+DATABASE\s*(\/\*.*\*\/\s*)?` + "`" + `)([^\s]+)(` + "`" + `\s*(\s*\/\*.*\*\/\s*)?\s*;$)`) +) + +func Restore(dbconn Connection, databasesMap map[string]string, readers []io.ReadSeeker) error { + db, err := sql.Open("mysql", dbconn.MySQL()) + if err != nil { + return fmt.Errorf("failed to open connection to database: %v", err) + } + defer db.Close() + + // load data into database by reading from each reader + ctx := context.Background() + for _, r := range readers { + tx, err := db.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable}) + if err != nil { + return fmt.Errorf("failed to restore database: %w", err) + } + scanner := bufio.NewScanner(r) + var current string + for scanner.Scan() { + line := scanner.Text() + if line == "" { + continue + } + current += line + "\n" + if line[len(line)-1] != ';' { + continue + } + // if we have the line that sets the database, and we need to replace, replace it + if createRegex.MatchString(current) { + dbName := createRegex.FindStringSubmatch(current)[3] + if newName, ok := databasesMap[dbName]; ok { + current = createRegex.ReplaceAllString(current, fmt.Sprintf("${1}%s${4}", newName)) + } + } + if useRegex.MatchString(current) { + dbName := useRegex.FindStringSubmatch(current)[2] + if newName, ok := databasesMap[dbName]; ok { + current = useRegex.ReplaceAllString(current, fmt.Sprintf("${1}%s${3}", newName)) + } + } + // we hit a break, so we have the entire transaction + if _, err := tx.Exec(current); err != nil { + _ = tx.Rollback() + return fmt.Errorf("failed to restore database: %w", err) + } + current = "" + } + if err := tx.Commit(); err != nil { + return fmt.Errorf("failed to restore database: %w", err) + } + } + + return nil +} diff --git a/pkg/database/schemas.go b/pkg/database/schemas.go new file mode 100644 index 00000000..5dddf333 --- /dev/null +++ b/pkg/database/schemas.go @@ -0,0 +1,48 @@ +package database + +import ( + "database/sql" + "fmt" +) + +var ( + excludeSchemaList = []string{"information_schema", "performance_schema", "sys", "mysql"} + excludeSchemas = map[string]bool{} +) + +func init() { + for _, schema := range excludeSchemaList { + excludeSchemas[schema] = true + } +} + +func GetSchemas(dbconn Connection) ([]string, error) { + db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s:%d)/", dbconn.User, dbconn.Pass, dbconn.Host, dbconn.Port)) + if err != nil { + return nil, fmt.Errorf("failed to open connection to database: %v", err) + } + defer db.Close() + + // TODO: get list of schemas + // mysql -h $DB_SERVER -P $DB_PORT $DBUSER $DBPASS -N -e 'show databases' + rows, err := db.Query("show databases") + if err != nil { + return nil, fmt.Errorf("could not get schemas: %v", err) + } + defer rows.Close() + + names := []string{} + for rows.Next() { + var name string + err := rows.Scan(&name) + if err != nil { + return nil, fmt.Errorf("error getting database name: %v", err) + } + if _, ok := excludeSchemas[name]; ok { + continue + } + names = append(names, name) + } + + return names, nil +} diff --git a/pkg/storage/credentials/creds.go b/pkg/storage/credentials/creds.go new file mode 100644 index 00000000..3062df92 --- /dev/null +++ b/pkg/storage/credentials/creds.go @@ -0,0 +1,12 @@ +package credentials + +type Creds struct { + SMBCredentials SMBCreds + AWSEndpoint string +} + +type SMBCreds struct { + Username string + Password string + Domain string +} diff --git a/pkg/storage/file/file.go b/pkg/storage/file/file.go new file mode 100644 index 00000000..15359420 --- /dev/null +++ b/pkg/storage/file/file.go @@ -0,0 +1,51 @@ +package file + +import ( + "io" + "net/url" + "os" + "path" + "path/filepath" +) + +type File struct { + url url.URL + path string +} + +func New(u url.URL) *File { + return &File{u, u.Path} +} + +func (f *File) Pull(source, target string) (int64, error) { + return copyFile(path.Join(f.path, source), target) +} + +func (f *File) Push(target, source string) (int64, error) { + return copyFile(source, filepath.Join(f.path, target)) +} + +func (f *File) Protocol() string { + return "file" +} + +func (f *File) URL() string { + return f.url.String() +} + +// copyFile copy a file from to as efficiently as possible +func copyFile(from, to string) (int64, error) { + src, err := os.Open(from) + if err != nil { + return 0, err + } + defer src.Close() + + dst, err := os.Create(to) + if err != nil { + return 0, err + } + defer dst.Close() + n, err := io.Copy(dst, src) + return n, err +} diff --git a/pkg/storage/parse.go b/pkg/storage/parse.go new file mode 100644 index 00000000..35f794ef --- /dev/null +++ b/pkg/storage/parse.go @@ -0,0 +1,47 @@ +package storage + +import ( + "fmt" + + "github.com/databacker/mysql-backup/pkg/storage/credentials" + "github.com/databacker/mysql-backup/pkg/storage/file" + "github.com/databacker/mysql-backup/pkg/storage/s3" + "github.com/databacker/mysql-backup/pkg/storage/smb" + "github.com/databacker/mysql-backup/pkg/util" +) + +func ParseURL(url string, creds credentials.Creds) (Storage, error) { + // parse the target URL + u, err := util.SmartParse(url) + if err != nil { + return nil, fmt.Errorf("invalid target url%v", err) + } + + // do the upload + var store Storage + switch u.Scheme { + case "file": + store = file.New(*u) + case "smb": + opts := []smb.Option{} + if creds.SMBCredentials.Domain != "" { + opts = append(opts, smb.WithDomain(creds.SMBCredentials.Domain)) + } + if creds.SMBCredentials.Username != "" { + opts = append(opts, smb.WithUsername(creds.SMBCredentials.Username)) + } + if creds.SMBCredentials.Password != "" { + opts = append(opts, smb.WithPassword(creds.SMBCredentials.Password)) + } + store = smb.New(*u, opts...) + case "s3": + opts := []s3.Option{} + if creds.AWSEndpoint != "" { + opts = append(opts, s3.WithEndpoint(creds.AWSEndpoint)) + } + store = s3.New(*u, opts...) + default: + return nil, fmt.Errorf("unknown url protocol: %s", u.Scheme) + } + return store, nil +} diff --git a/pkg/storage/s3/s3.go b/pkg/storage/s3/s3.go new file mode 100644 index 00000000..de405453 --- /dev/null +++ b/pkg/storage/s3/s3.go @@ -0,0 +1,176 @@ +package s3 + +import ( + "context" + "fmt" + "net/url" + "os" + "path" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + log "github.com/sirupsen/logrus" +) + +type S3 struct { + url url.URL + // pathStyle option is not really used, but may be required + // at some point; see https://aws.amazon.com/blogs/aws/amazon-s3-path-deprecation-plan-the-rest-of-the-story/ + pathStyle bool + region string + endpoint string + accessKeyId string + secretAccessKey string +} + +type Option func(s *S3) + +func WithPathStyle() Option { + return func(s *S3) { + s.pathStyle = true + } +} +func WithRegion(region string) Option { + return func(s *S3) { + s.region = region + } +} +func WithEndpoint(endpoint string) Option { + return func(s *S3) { + s.endpoint = endpoint + } +} +func WithAccessKeyId(accessKeyId string) Option { + return func(s *S3) { + s.accessKeyId = accessKeyId + } +} +func WithSecretAccessKey(secretAccessKey string) Option { + return func(s *S3) { + s.secretAccessKey = secretAccessKey + } +} + +func New(u url.URL, opts ...Option) *S3 { + s := &S3{url: u} + for _, opt := range opts { + opt(s) + } + return s +} + +func (s *S3) Pull(source, target string) (int64, error) { + // TODO: need to find way to include cli opts and cli_s3_cp_opts + // old was: + // aws ${AWS_CLI_OPTS} s3 cp ${AWS_CLI_S3_CP_OPTS} "${DB_RESTORE_TARGET}" $TMPRESTORE + + bucket, path := s.url.Hostname(), path.Join(s.url.Path, source) + // The session the S3 Downloader will use + cfg, err := getConfig(s.endpoint) + if err != nil { + return 0, fmt.Errorf("failed to load AWS config: %v", err) + } + + client := s3.NewFromConfig(cfg) + + // Create a downloader with the session and default options + downloader := manager.NewDownloader(client) + + // Create a file to write the S3 Object contents to. + f, err := os.Create(target) + if err != nil { + return 0, fmt.Errorf("failed to create target restore file %q, %v", target, err) + } + defer f.Close() + + // Write the contents of S3 Object to the file + n, err := downloader.Download(context.TODO(), f, &s3.GetObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(path), + }) + if err != nil { + return 0, fmt.Errorf("failed to download file, %v", err) + } + return n, nil +} + +func (s *S3) Push(target, source string) (int64, error) { + // TODO: need to find way to include cli opts and cli_s3_cp_opts + // old was: + // aws ${AWS_CLI_OPTS} s3 cp ${AWS_CLI_S3_CP_OPTS} "${DB_RESTORE_TARGET}" $TMPRESTORE + + bucket, key := s.url.Hostname(), s.url.Path + // The session the S3 Downloader will use + cfg, err := getConfig(s.endpoint) + if err != nil { + return 0, fmt.Errorf("failed to load AWS config: %v", err) + } + + client := s3.NewFromConfig(cfg) + // Create an uploader with the session and default options + uploader := manager.NewUploader(client) + + // Create a file to write the S3 Object contents to. + f, err := os.Open(source) + if err != nil { + return 0, fmt.Errorf("failed to read input file %q, %v", source, err) + } + defer f.Close() + + // Write the contents of the file to the S3 object + _, err = uploader.Upload(context.TODO(), &s3.PutObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(path.Join(key, target)), + Body: f, + }) + if err != nil { + return 0, fmt.Errorf("failed to upload file, %v", err) + } + return 0, nil +} + +func (s *S3) Protocol() string { + return "s3" +} + +func (s *S3) URL() string { + return s.url.String() +} + +func getEndpoint(endpoint string) string { + // for some reason, the lookup gets flaky when the endpoint is 127.0.0.1 + // so you have to set it to localhost explicitly. + e := endpoint + u, err := url.Parse(endpoint) + if err == nil { + if u.Hostname() == "127.0.0.1" { + port := u.Port() + u.Host = "localhost" + if port != "" { + u.Host += ":" + port + } + e = u.String() + } + } + return e +} + +func getConfig(endpoint string) (aws.Config, error) { + cleanEndpoint := getEndpoint(endpoint) + opts := []func(*config.LoadOptions) error{ + config.WithEndpointResolverWithOptions( + aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) { + return aws.Endpoint{URL: cleanEndpoint}, nil + }), + ), + } + if log.IsLevelEnabled(log.TraceLevel) { + opts = append(opts, config.WithClientLogMode(aws.LogRequestWithBody|aws.LogResponse)) + } + return config.LoadDefaultConfig(context.TODO(), + opts..., + ) + +} diff --git a/pkg/storage/smb/smb.go b/pkg/storage/smb/smb.go new file mode 100644 index 00000000..f2dde505 --- /dev/null +++ b/pkg/storage/smb/smb.go @@ -0,0 +1,173 @@ +package smb + +import ( + "fmt" + "io" + "net" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/cloudsoda/go-smb2" +) + +const ( + defaultSMBPort = "445" +) + +type SMB struct { + url url.URL + domain string + username string + password string +} + +type Option func(s *SMB) + +func WithDomain(domain string) Option { + return func(s *SMB) { + s.domain = domain + } +} +func WithUsername(username string) Option { + return func(s *SMB) { + s.username = username + } +} +func WithPassword(password string) Option { + return func(s *SMB) { + s.password = password + } +} + +func New(u url.URL, opts ...Option) *SMB { + s := &SMB{url: u} + for _, opt := range opts { + opt(s) + } + return s +} + +func (s *SMB) Pull(source, target string) (int64, error) { + return s.command(false, s.url, source, target) +} + +func (s *SMB) Push(target, source string) (int64, error) { + return s.command(true, s.url, target, source) +} + +func (s *SMB) Protocol() string { + return "smb" +} + +func (s *SMB) URL() string { + return s.url.String() +} + +func (s *SMB) command(push bool, u url.URL, remoteFilename, filename string) (int64, error) { + var ( + username, password, domain string + ) + + hostname, port, path := u.Hostname(), u.Port(), u.Path + // set default port + if port == "" { + port = defaultSMBPort + } + host := fmt.Sprintf("%s:%s", hostname, port) + share, sharepath := parseSMBPath(path) + if s.username == "" && u.User != nil { + username = u.User.Username() + password, _ = u.User.Password() + } + + username, domain = parseSMBDomain(username) + + conn, err := net.Dial("tcp", host) + if err != nil { + return 0, err + } + defer conn.Close() + + d := &smb2.Dialer{ + Initiator: &smb2.NTLMInitiator{ + Domain: domain, + User: username, + Password: password, + }, + } + + smbConn, err := d.Dial(conn) + if err != nil { + return 0, err + } + defer func() { + _ = smbConn.Logoff() + }() + + fs, err := smbConn.Mount(share) + if err != nil { + return 0, err + } + defer func() { + _ = fs.Umount() + }() + + smbFilename := fmt.Sprintf("%s%c%s", sharepath, smb2.PathSeparator, filepath.Base(strings.ReplaceAll(remoteFilename, ":", "-"))) + + var ( + from io.ReadCloser + to io.WriteCloser + ) + if push { + from, err = os.Open(filename) + if err != nil { + return 0, err + } + defer from.Close() + to, err = fs.Create(smbFilename) + if err != nil { + return 0, err + } + defer to.Close() + } else { + to, err = os.Create(filename) + if err != nil { + return 0, err + } + defer to.Close() + from, err = fs.Open(smbFilename) + if err != nil { + return 0, err + } + defer from.Close() + } + return io.Copy(to, from) +} + +// parseSMBDomain parse a username to get an SMB domain +// nolint: unused +func parseSMBDomain(username string) (user, domain string) { + parts := strings.SplitN(username, ";", 2) + if len(parts) < 2 { + return username, "" + } + // if we reached this point, we have a username that has a domain in it + return parts[1], parts[0] +} + +// parseSMBPath parse an smb path into its constituent parts +func parseSMBPath(path string) (share, sharepath string) { + sep := "/" + parts := strings.Split(path, sep) + if len(parts) <= 1 { + return path, "" + } + // if the path started with a slash, it might have an empty string as the first element + if parts[0] == "" { + parts = parts[1:] + } + // ensure no leading / as it messes up SMB + return parts[0], strings.Join(parts[1:], sep) +} diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go new file mode 100644 index 00000000..2d7cecf3 --- /dev/null +++ b/pkg/storage/storage.go @@ -0,0 +1,8 @@ +package storage + +type Storage interface { + Push(target, source string) (int64, error) + Pull(source, target string) (int64, error) + Protocol() string + URL() string +} diff --git a/pkg/util/namedreader.go b/pkg/util/namedreader.go new file mode 100644 index 00000000..e03ed24c --- /dev/null +++ b/pkg/util/namedreader.go @@ -0,0 +1,11 @@ +package util + +import ( + "io" +) + +type NamedReader struct { + Name string + io.ReaderAt + io.ReadSeeker +} diff --git a/pkg/util/parse.go b/pkg/util/parse.go new file mode 100644 index 00000000..7b7ba2b5 --- /dev/null +++ b/pkg/util/parse.go @@ -0,0 +1,15 @@ +package util + +import ( + "net/url" + "strings" +) + +// smartParse parse a url, but convert "/" into "file:///" +func SmartParse(raw string) (*url.URL, error) { + if strings.HasPrefix(raw, "/") { + raw = "file://" + raw + } + + return url.Parse(raw) +} diff --git a/sample-configs/config.yaml b/sample-configs/config.yaml new file mode 100644 index 00000000..a205965a --- /dev/null +++ b/sample-configs/config.yaml @@ -0,0 +1,75 @@ +# sample configuration file for entire local config, not using remote config service +# will be overridden by command-line arguments +type: config.databack.io +version: 1 + +# set logging level, one of: error,warning,info,debug,trace; default is info +logging: info + +# dump, or backup, configuration +dump: + include: # optional, otherwise will do all tables except system tables + - table1 + - table2 + exclude: # optional, otherwise will do all tables except system tables + - table3 + - table4 + safechars: true # defaults to false + no-database-name: false # remove the `USE ` statement from backup files, defaults to false + # schedule to dump, can use one of: cron, frequency, once. If frequency is set, begin will be checked + schedule: + once: true # run only once and exit; ignores all other scheduling. Defaults to false + cron: "0 10 * * *" + frequency: 1440 # in minutes + begin: +25 # What time to do the first dump. Must be in one of two formats: Absolute: HHMM, e.g. `2330` or `0415`; or Relative: +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half" + compression: gzip # defaults to gzip + compact: true # defaults to false + maxAllowedPacket: 4194304 # defaults to 4194304 + tmp-path: /var/tmp/workdir # defaults to system-defined + filename-pattern: db_backup_{{ .now }}.{{ .compression }} + scripts: + pre-backup: /path/to/prescripts/ + post-backup: /path/to/postscripts/ + # list of named targets to backup to, from the index below + targets: + - s3 + - file + - otherfile + - smbshare + +restore: + scripts: + pre-restore: /path/to/prescripts/ + post-restore: /path/to/postscripts/ + +# database configuration +database: + server: host + port: port + credentials: + username: user + password: password + +# targets. Each target is a location, as well as credentials and config, as needed +targets: + s3: + type: s3 + url: s3://bucket.us-west.amazonaws.com/databackup + region: us-west-1 + endpoint: https://s3.us-west-1.amazonaws.com + credentials: + access-key-id: access_key_id + secret-access-key: secret_access_key + file: + type: file + url: file:///tmp/databackup + otherfile: + type: file + url: /tmp/databackup + smbshare: + type: smb + url: smb://cifshost:2125/databackup + credentials: + domain: mydomain + username: user + password: password diff --git a/sample-configs/remote-config.yaml b/sample-configs/remote-config.yaml new file mode 100644 index 00000000..eed1fda1 --- /dev/null +++ b/sample-configs/remote-config.yaml @@ -0,0 +1,30 @@ +# sample configuration file for all config provided by remote service +type: config.databack.io +version: 1 + +# set logging level, one of: error,warning,info,debug,trace; default is info +logging: info + +# receives the config from the config service, so nothing else needed +config: + url: https://config.databack.io + # certificate for the server; unneeded if the server is using a certificate signed by a well-known CA + certificate: | + -----BEGIN CERTIFICATE----- + MIIBHjCBxaADAgECAgEBMAoGCCqGSM49BAMCMBcxFTATBgNVBAoTDERvY2tlciwg + SW5jLjAeFw0xMzA3MjUwMTEwMjRaFw0xNTA3MjUwMTEwMjRaMBcxFTATBgNVBAoT + DERvY2tlciwgSW5jLjBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABMolCWAO0iP7 + tkX/KLjQ9CKeOoHYynBgfFcd1ZGoxcefmIbWjHx29eWI3xlhbjS6ssSxhrw1Kuh5 + RrASfUCHD7SjAjAAMAoGCCqGSM49BAMCA0gAMEUCIQDRLQTSSeqjsxsb+q4exLSt + EM7f7/ymBzoUzbXU7wI9AgIgXCWaI++GkopGT8T2qV/3+NL0U+fYM0ZjSNSiwaK3 + +kA= + -----END CERTIFICATE----- + credentials: + # this is a sample key only + # DO NOT USE THIS KEY; GENERATE YOUR OWN! + private-key: | + -----BEGIN EC PRIVATE KEY----- + MHcCAQEEIEFRa42BSz1uuRxWBh60vePDrpkgtELJJMZtkJGlExuLoAoGCCqGSM49 + AwEHoUQDQgAEyiUJYA7SI/u2Rf8ouND0Ip46gdjKcGB8Vx3VkajFx5+YhtaMfHb1 + 5YjfGWFuNLqyxLGGvDUq6HlGsBJ9QIcPtA== + -----END EC PRIVATE KEY----- diff --git a/sample-configs/telemetry-only.yaml b/sample-configs/telemetry-only.yaml new file mode 100644 index 00000000..1d587fe0 --- /dev/null +++ b/sample-configs/telemetry-only.yaml @@ -0,0 +1,101 @@ +# sample configuration file for telemetry service only; everything else is local +# will be overridden by command-line arguments +# or remote, if configured + +# only needed if registered to send logs and results to a telemetry service +# and not defined in the config service. Normally, you can just use the config +# to get the telemetry info +type: config.databack.io +version: 1 + +# set logging level, one of: error,warning,info,debug,trace; default is info +logging: info + +telemetry: + url: https://telemetry.databack.io + # only needed if required by endpoint + certificate: | + -----BEGIN CERTIFICATE----- + MIIBHjCBxaADAgECAgEBMAoGCCqGSM49BAMCMBcxFTATBgNVBAoTDERvY2tlciwg + SW5jLjAeFw0xMzA3MjUwMTEwMjRaFw0xNTA3MjUwMTEwMjRaMBcxFTATBgNVBAoT + DERvY2tlciwgSW5jLjBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABMolCWAO0iP7 + tkX/KLjQ9CKeOoHYynBgfFcd1ZGoxcefmIbWjHx29eWI3xlhbjS6ssSxhrw1Kuh5 + RrASfUCHD7SjAjAAMAoGCCqGSM49BAMCA0gAMEUCIQDRLQTSSeqjsxsb+q4exLSt + EM7f7/ymBzoUzbXU7wI9AgIgXCWaI++GkopGT8T2qV/3+NL0U+fYM0ZjSNSiwaK3 + +kA= + -----END CERTIFICATE----- + credentials: + # this is a sample key only + # DO NOT USE THIS KEY; GENERATE YOUR OWN! + private-key: | + -----BEGIN EC PRIVATE KEY----- + MHcCAQEEIEFRa42BSz1uuRxWBh60vePDrpkgtELJJMZtkJGlExuLoAoGCCqGSM49 + AwEHoUQDQgAEyiUJYA7SI/u2Rf8ouND0Ip46gdjKcGB8Vx3VkajFx5+YhtaMfHb1 + 5YjfGWFuNLqyxLGGvDUq6HlGsBJ9QIcPtA== + -----END EC PRIVATE KEY----- + +# dump, or backup, configuration +dump: + include: # optional, otherwise will do all tables except system tables + - table1 + - table2 + exclude: # optional, otherwise will do all tables except system tables + - table3 + - table4 + safechars: true # defaults to false + no-database-name: false # remove the `USE ` statement from backup files, defaults to false + # schedule to dump, can use one of: cron, frequency, once. If frequency is set, begin will be checked + schedule: + once: true # run only once and exit; ignores all other scheduling. Defaults to false + cron: "0 10 * * *" + frequency: 1440 # in minutes + begin: 25 # minutes from initialization + compression: gzip # defaults to gzip + tmp-path: /var/tmp/workdir # defaults to system-defined + filename-pattern: db_backup_{{ .now }}.{{ .compression }} + scripts: + pre-backup: /path/to/prescripts/ + post-backup: /path/to/postscripts/ + # list of named targets to backup to, from the index below + targets: + - s3 + - file + - otherfile + - smbshare + +restore: + scripts: + pre-restore: /path/to/prescripts/ + post-restore: /path/to/postscripts/ + +# database configuration +database: + server: host + port: port + credentials: + username: user + password: password + +# targets. Each target is a location, as well as credentials and config, as needed +targets: + s3: + type: s3 + url: s3://bucket.us-west.amazonaws.com/databackup + region: us-west-1 + endpoint: https://s3.us-west-1.amazonaws.com + credentials: + access-key-id: access_key_id + secret-access-key: secret_access_key + file: + type: file + url: file:///tmp/databackup + otherfile: + type: file + url: /tmp/databackup + smbshare: + type: smb + url: smb://cifshost:2125/databackup + credentials: + domain: mydomain + username: user + password: password diff --git a/test/Dockerfile b/test/Dockerfile new file mode 100644 index 00000000..aa3a555d --- /dev/null +++ b/test/Dockerfile @@ -0,0 +1,31 @@ +FROM mysql:8.0 + +## MYSQL + + +FROM alpine:3.17 + +## SAMBA + +# smb port +EXPOSE 445 + +# install the necessary client +RUN apk add --update bash samba-server && rm -rf /var/cache/apk/* && touch /etc/samba/smb.conf + +# enter smb.conf +COPY smb.conf /etc/samba/ +COPY smbusers /etc/samba/ +COPY *.tdb /var/lib/samba/private/ +# create a user with no home directory but the right password +RUN adduser user -D -H +RUN echo user:pass | chpasswd + +### s3 +RUN apk add --update minio + +# start samba +#CMD /usr/sbin/smbd -F --debug-stdout -d 4 --no-process-group + +# start minio +#RUN minio server /path/to/s3 diff --git a/test/Dockerfile_test b/test/Dockerfile_test deleted file mode 100644 index 3dcc6f68..00000000 --- a/test/Dockerfile_test +++ /dev/null @@ -1,19 +0,0 @@ -# mysql backup image -ARG BASE=mysqlbackup_backup_test -FROM ${BASE} -MAINTAINER Avi Deitcher - -# set us up to run as non-root user -# user/group 'appuser' are created in the base -USER root - -RUN mkdir -p /backups && chown appuser:appuser /backups - -USER appuser - -COPY entrypoint_test.sh /entrypoint -COPY cron_test.sh /cron_test.sh - -ENTRYPOINT ["/entrypoint"] - - diff --git a/test/README.md b/test/README.md new file mode 100644 index 00000000..76970114 --- /dev/null +++ b/test/README.md @@ -0,0 +1,73 @@ +# Integration Tests + +This folder contains integration tests. They are executed only if the go tag `integration` is set, e.g. + +```bash +go test -tags=integration +``` + +As part of the process, it starts mysql, smb and s3 containers, and then runs the tests against them. +When it is done, it tears them down. + +If you wish to keep the containers, for example, for inspection, then run it with the `keepcontainers` tag, e.g. + +```bash +go test -tags=integration,keepcontainers +``` + +If you wish to see the logs from the various containers - smb, s3, mysql - before they are torn down, then run it +with the `logs` tag, e.g. + +```bash +go test -tags=integration,logs +``` + +## How it works + +There are three containers started: + +* mysql +* smb +* s3 + +These are all started using the golang docker API. Each of these has their port exposed to the host machine. +The startup process lets docker pick the port, and then finds it. + +At that point, each test in the list of tests is run bu invoking `mysql-backup` directly on the host machine, +pointing it at the various targets. `mysql-backup` is **not** invoked as a subprocess, but rather as a library call. +This does leave the possibility of a bug in how the CLI calls the library, but we accept that risk as reasonable. + +Because the SMB and S3 containers save to local directories, the place to check the results needs to be mounted into +the containers. + +On startup, the test creates a temporary working directory, henceforth called `base`. All files are saved to somewhere +inside base, whether as a file target for backups with target of file://dir or /dir, or for an S3 or SMB target inside +their respective containers, or for storing pre/post backup/restore scripts. + +The structure of the base directory is as follows. Keep in mind that we have one single SMB and S3 container each, so the +directory is shared among different backups. That means we need to distinguish among targets that we pass to the +containers. Further, they might run in parallel, so it is important that the different activities do not trounce each other. + +We resolve this by having each backup target get its own directory under `base/backups/`. The name of the directory +cannot be based just on the target, as that might be reused. We also try to avoid sequence numbers, as they are not very +helpful. Instead, each target gets a random directory name. This is then appended to the target. + +Here are some examples, assuming that the base is `/tmp/mysql-backup-test-abcd123` and the random generated number +is `115647`: + +* `file://dir` -> `/tmp/mysql-backup-test-abcd123/backups/dir/115647` +* `s3://s3/bucket1` -> `s3://s3/bucket1/115647` ; which, since `/tmp/mysql-backup-test-abcd123/` is mounted to the + container, becomes `/tmp/mysql-backup-test-abcd123/backups/s3/bucket1/115647` +* `smb://smb/path2` -> `smb://smb/path2/115647` ; which, since `/tmp/mysql-backup-test-abcd123/` is mounted to the + container, becomes `/tmp/mysql-backup-test-abcd123/backups/smb/path2/115647` + +In order to keep it simple, we have the test target be the basic, e.g. `smb://smb/noauth` or `/backups`, and then we +add the rest of the path to the caller before passing it on to `mysql-backup`. + +Structure of base is: + +base/ - base of the backup area + backup.sql - the backup we take manually at the beginning, for comparison + backups/ - the directory where backups are stored + 15674832/ - one target's backup + 88725436/ - another target's backup diff --git a/test/_functions.sh b/test/_functions.sh deleted file mode 100644 index c994c408..00000000 --- a/test/_functions.sh +++ /dev/null @@ -1,239 +0,0 @@ -#!/bin/bash -set -e - -DEBUG=${DEBUG:-0} -[[ -n "$DEBUG" && "$DEBUG" == "verbose" ]] && DEBUG=1 -[[ -n "$DEBUG" && "$DEBUG" == "debug" ]] && DEBUG=2 - -[[ "$DEBUG" == "2" ]] && set -x - -BACKUP_IMAGE=mysqlbackup_backup_test:latest -BACKUP_TESTER_IMAGE=mysqlbackup_backup_test_harness:latest -SMB_IMAGE=mysqlbackup_smb_test:latest -BACKUP_VOL=mysqlbackup-test -CERTS_VOL=mysqlbackup-certs -MYSQLDUMP_OPTS="--ssl-cert /certs/client-cert.pem --ssl-key /certs/client-key.pem" -RESTORE_OPTS="--ssl-cert /certs/client-cert.pem --ssl-key /certs/client-key.pem" -MYSQLUSER=user -MYSQLPW=abcdefg -MYSQL_IMAGE=mysql:8.0 -arch=$(uname -m) -if [ "$arch" = "arm64" -o "$arch" = "aarch64" ]; then - MYSQL_IMAGE=${MYSQL_IMAGE}-oracle -fi - -QUIET="-q" -[[ "$DEBUG" != "0" ]] && QUIET="" - -smb_cid= -mysql_cid= -s3_cid= - -# create a tmp backupfile -function create_backup_file() { - local target=/tmp/backup.$$.tgz - echo 'use tester; create table t1 (id INT, name VARCHAR(20)); INSERT INTO t1 (id,name) VALUES (1, "John"), (2, "Jill"), (3, "Sam"), (4, "Sarah");' | $db_connect - tmpdumpdir=/tmp/backup_holder.$$ - rm -rf $tmpdumpdir - mkdir $tmpdumpdir - tmpdumpfile=backup.sql - docker exec $mysql_cid mysqldump -hlocalhost --protocol=tcp -u$MYSQLUSER -p$MYSQLPW --compact --databases tester > $tmpdumpdir/$tmpdumpfile - tar -C $tmpdumpdir -cvf - $tmpdumpfile | gzip > ${target} - cat $target | docker run --label mysqltest --name mysqlbackup-data-source -i --rm -v ${BACKUP_VOL}:/backups -v ${CERTS_VOL}:/certs -e DEBUG=${DEBUG} -e MYSQLDUMP_OPTS="${MYSQLDUMP_OPTS}" ${BACKUP_TESTER_IMAGE} save_dump - rm -rf $tmpdumpdir $target -} - -# Configure backup directory -function configure_backup_directory_target() { - local t=$1 - local seqno=$2 - # where will we store - # create the backups directory - # clear the target - # replace SEQ if needed - t2=${t/SEQ/${seqno}} - mkdir -p ${BACKUP_DIRECTORY_BASE}/${seqno}/data - chmod -R 0777 ${BACKUP_DIRECTORY_BASE}/${seqno} - echo "target: ${t2}" >> ${BACKUP_DIRECTORY_BASE}/${seqno}/list - - # are we working with nopath? - if [[ "$t2" =~ nopath ]]; then - rm -f ${BACKUP_DIRECTORY_BASE}/nopath - ln -s ${seqno}/data ${BACKUP_DIRECTORY_BASE}/nopath - fi - - echo ${t2} -} - -function get_default_source() { - echo "db_backup_*.tgz" -} - -function make_test_images() { - [[ "$DEBUG" != "0" ]] && echo "Creating backup image" - - docker build $QUIET -t ${BACKUP_IMAGE} -f ../Dockerfile ../ - docker build $QUIET -t ${BACKUP_TESTER_IMAGE} -f Dockerfile_test --build-arg BASE=${BACKUP_IMAGE} ctr/ -} - -function rm_containers() { - local cids=$@ - [[ "$DEBUG" != "0" ]] && echo "Removing backup containers" - - # stop and remove each container - [[ "$DEBUG" != "0" ]] && echo "Stopping and removing ${cids}" - for i in ${cids}; do - CMD1="docker kill ${i}" - CMD2="docker rm ${i}" - if [[ "$DEBUG" == "0" ]]; then - $CMD1 > /dev/null 2>&1 - $CMD2 > /dev/null 2>&1 - else - # keep the logs - docker logs $i - $CMD1 - $CMD2 - fi - done -} - -function makenetwork() { - # create the network we need - [[ "$DEBUG" != "0" ]] && echo "Creating the test network" - # make sure no old one still is there - local EXISTING_NETS=$(docker network ls --filter label=mysqltest -q) - [ -n "${EXISTING_NETS}" ] && docker network rm ${EXISTING_NETS} - docker network create mysqltest --label mysqltest -} -function makevolume() { - # make sure no previous one exists - local EXISTING_VOLS=$(docker volume ls --filter label=mysqltest -q) - [ -n "${EXISTING_VOLS}" ] && docker volume rm ${EXISTING_VOLS} - docker volume create --label mysqltest $BACKUP_VOL - docker volume create --label mysqltest $CERTS_VOL -} -function makesmb() { - # build the service images we need - [[ "$DEBUG" != "0" ]] && echo "Creating smb image" - docker build $QUIET -t ${SMB_IMAGE} -f ./Dockerfile_smb ctr/ -} -function start_service_containers() { - # run the test images we need - [[ "$DEBUG" != "0" ]] && echo "Running smb, s3 and mysql containers" - smb_cid=$(docker run --label mysqltest --net mysqltest --name=smb -d -p 445:445 -v ${BACKUP_VOL}:/share/backups -t ${SMB_IMAGE}) - mysql_cid=$(docker run --label mysqltest --net mysqltest --name mysql -d -v ${CERTS_VOL}:/certs -e MYSQL_ROOT_PASSWORD=root -e MYSQL_DATABASE=tester -e MYSQL_USER=$MYSQLUSER -e MYSQL_PASSWORD=$MYSQLPW $MYSQL_IMAGE --require-secure-transport) - docker exec -i mysql bash -c "until [[ -f /var/lib/mysql/client-cert.pem ]]; do sleep 5; done; cp /var/lib/mysql/client-cert.pem /certs" - docker exec -i mysql bash -c "until [[ -f /var/lib/mysql/client-key.pem ]]; do sleep 5; done; cp /var/lib/mysql/client-key.pem /certs" - # need process privilege, set it up after waiting for the mysql to be ready - s3_cid=$(docker run --label mysqltest --net mysqltest --name s3 -d -v ${CERTS_VOL}:/certs -v ${BACKUP_VOL}:/fakes3_root/s3/mybucket lphoward/fake-s3 -r /fakes3_root -p 443) - # Allow up to 20 seconds for the database to be ready - db_connect="docker exec -i $mysql_cid mysql ${MYSQLDUMP_OPTS} -u$MYSQLUSER -p$MYSQLPW --protocol=tcp -h127.0.0.1 --wait --connect_timeout=20 tester" - retry_count=0 - retryMax=20 - retrySleep=1 - until [[ $retry_count -ge $retryMax ]]; do - set +e - $db_connect -e 'select 1;' - success=$? - set -e - [[ $success == 0 ]] && break - ((retry_count ++)) || true - sleep $retrySleep - done - # did we succeed? - if [[ $success != 0 ]]; then - echo -n "failed to connect to database after $retryMax tries." >&2 - return 1 - fi - # ensure the user has the right privileges - docker exec -i mysql mysql ${MYSQLDUMP_OPTS} -uroot -proot --protocol=tcp -h127.0.0.1 -e "grant process on *.* to user;" -} -function rm_service_containers() { - local smb_cid="$1" - local mysql_cid="$2" - local s3_cid="$3" - if [[ "$DEBUG" == "2" ]]; then - echo - echo "SMB LOGS:" - docker logs $smb_cid - echo - echo "MYSQL LOGS:" - docker logs $mysql_cid - echo - echo "S3 LOGS:" - docker logs $s3_cid - fi - - [[ "$DEBUG" != "0" ]] && echo "Stopping and removing smb, mysql and s3 containers" - local CMD1="docker kill $smb_cid $mysql_cid $s3_cid" - local CMD2="docker rm $smb_cid $mysql_cid $s3_cid" - if [[ "$DEBUG" == "0" ]]; then - $CMD1 > /dev/null 2>&1 - $CMD2 > /dev/null 2>&1 - else - $CMD1 - $CMD2 - fi -} -function rm_network() { - [[ "$DEBUG" != "0" ]] && echo "Removing docker network" - docker network rm mysqltest -} -function rm_volume() { - [[ "$DEBUG" != "0" ]] && echo "Removing docker volume" - docker volume rm ${BACKUP_VOL} - docker volume rm ${CERTS_VOL} -} -function run_dump_test() { - local t=$1 - local sequence=$2 - local subseq=0 - local allTargets= - # we might have multiple targets - for target in $t ; do - seqno="${sequence}-${subseq}" - # where will we store - # create the backups directory - # clear the target - # replace SEQ if needed - t2=${target/SEQ/${seqno}} - allTargets="${allTargets} ${t2}" - - ((subseq++)) || true - done - - # if in DEBUG, make sure backup also runs in DEBUG - if [[ "$DEBUG" != "0" ]]; then - DBDEBUG="-e DB_DUMP_DEBUG=2" - else - DBDEBUG= - fi - - # change our target - # ensure that we remove leading whitespace from targets - allTargets=$(echo $allTargets | awk '{$1=$1;print}') - if [[ "$sequence" -lt 1 ]]; then - # on first run we need to fix /certs/*.pem permissions and assign it to appuser otherwise mysqldump command fails - c_with_wrong_permission=$(docker container create --label mysqltest --name mysqltest-fix-certs-permissions -v ${BACKUP_VOL}:/backups -v ${CERTS_VOL}:/certs ${DBDEBUG} -e DB_USER=$MYSQLUSER -e DB_PASS=$MYSQLPW -e DB_DUMP_FREQ=60 -e DB_DUMP_BEGIN=+0 -e DB_DUMP_TARGET="${allTargets}" -e DB_SERVER=mysql -e MYSQLDUMP_OPTS="--compact ${MYSQLDUMP_OPTS}" ${BACKUP_IMAGE}) - docker container start ${c_with_wrong_permission} >/dev/null - docker exec -u 0 ${c_with_wrong_permission} chown -R appuser /certs>/dev/null - rm_containers $c_with_wrong_permission - fi - cid=$(docker container create --label mysqltest --name mysqlbackup-${sequence} --net mysqltest -v ${BACKUP_VOL}:/backups -v ${CERTS_VOL}:/certs --link ${s3_cid}:mybucket.s3.amazonaws.com ${DBDEBUG} -e DB_USER=$MYSQLUSER -e DB_PASS=$MYSQLPW -e DB_DUMP_FREQ=60 -e DB_DUMP_BEGIN=+0 -e DB_DUMP_TARGET="${allTargets}" -e AWS_ACCESS_KEY_ID=abcdefg -e AWS_SECRET_ACCESS_KEY=1234567 -e AWS_ENDPOINT_URL=http://s3:443/ -e DB_SERVER=mysql -e MYSQLDUMP_OPTS="--compact ${MYSQLDUMP_OPTS}" ${BACKUP_IMAGE}) - linkfile=/tmp/link.$$ - ln -s /backups/$sequence ${linkfile} - docker cp ${linkfile} $cid:/scripts.d - rm ${linkfile} - docker container start ${cid} >/dev/null - echo $cid -} - -function sleepwait() { - local waittime=$1 - [[ "$DEBUG" != "0" ]] && echo "Waiting ${waittime} seconds to complete backup runs" - os=$(uname -s | tr [A-Z] [a-z]) - if [ "$os" = "linux" ]; then - waittime="${waittime}s" - fi - sleep ${waittime} -} diff --git a/test/backup_log_containers.go b/test/backup_log_containers.go new file mode 100644 index 00000000..930fb0b2 --- /dev/null +++ b/test/backup_log_containers.go @@ -0,0 +1,7 @@ +//go:build integration && !logs + +package test + +func logContainers(dc *dockerContext, cids ...string) error { + return nil +} diff --git a/test/backup_nolog_containers.go b/test/backup_nolog_containers.go new file mode 100644 index 00000000..e21f5c30 --- /dev/null +++ b/test/backup_nolog_containers.go @@ -0,0 +1,7 @@ +//go:build integration && logs + +package test + +func logContainers(dc *dockerContext, cids ...string) error { + return dc.logContainers(cids...) +} diff --git a/test/backup_teardown_test.go b/test/backup_teardown_test.go new file mode 100644 index 00000000..0d675f66 --- /dev/null +++ b/test/backup_teardown_test.go @@ -0,0 +1,12 @@ +//go:build integration + +package test + +import "fmt" + +func teardown(dc *dockerContext, cids ...string) error { + if err := dc.rmContainers(cids...); err != nil { + return fmt.Errorf("failed to remove containers: %v", err) + } + return nil +} diff --git a/test/backup_test.go b/test/backup_test.go new file mode 100644 index 00000000..3fc6d132 --- /dev/null +++ b/test/backup_test.go @@ -0,0 +1,856 @@ +//go:build integration + +package test + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/gzip" + "context" + "errors" + "fmt" + "io" + "math/rand" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "syscall" + "testing" + "time" + + "github.com/databacker/mysql-backup/pkg/compression" + "github.com/databacker/mysql-backup/pkg/core" + "github.com/databacker/mysql-backup/pkg/database" + "github.com/databacker/mysql-backup/pkg/storage" + "github.com/databacker/mysql-backup/pkg/storage/credentials" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/go-connections/nat" + "github.com/johannesboyne/gofakes3" + "github.com/johannesboyne/gofakes3/backend/s3mem" + "github.com/moby/moby/pkg/archive" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +const ( + mysqlUser = "user" + mysqlPass = "abcdefg" + mysqlRootUser = "root" + mysqlRootPass = "root" + smbImage = "mysqlbackup_smb_test:latest" + mysqlImage = "mysql:8.0" + bucketName = "mybucket" +) + +// original filters also filtered out anything that started with "/\*![\d]{5}.\*/;$", +// i.e. in a comment, ending in ;, and a 5 digit number in the comment at the beginning +// after a ! +// not sure we want to filter those out +var dumpFilterRegex = []*regexp.Regexp{ + //regexp.MustCompile("^.*SET character_set_client.*|s/^\/\*![0-9]\{5\}.*\/;$"), + regexp.MustCompile(`(?i)^\s*-- MySQL dump .*$`), + regexp.MustCompile(`(?i)^\s*-- Go SQL dump .*$`), + regexp.MustCompile(`(?i)^\s*-- Dump completed on .*`), +} + +type containerPort struct { + name string + id string + port int +} +type dockerContext struct { + cli *client.Client +} + +type backupTarget struct { + s string + id string + subid string + localPath string +} + +func (t backupTarget) String() string { + return t.s +} +func (t backupTarget) WithPrefix(prefix string) string { + // prepend the prefix to the path, but only to the path + // and only if it is file scheme + scheme := t.Scheme() + if scheme != "file" && scheme != "" { + return t.s + } + u, err := url.Parse(t.s) + if err != nil { + return "" + } + u.Path = filepath.Join(prefix, u.Path) + return u.String() +} + +func (t backupTarget) Scheme() string { + u, err := url.Parse(t.s) + if err != nil { + return "" + } + return u.Scheme +} +func (t backupTarget) Host() string { + u, err := url.Parse(t.s) + if err != nil { + return "" + } + return u.Host +} +func (t backupTarget) Path() string { + u, err := url.Parse(t.s) + if err != nil { + return "" + } + return u.Path +} + +// uniquely generated ID of the target. Shared across multiple targets that are part of the same +// backup set, e.g. "file:///backups/ smb://smb/path", where each sub has its own subid +func (t backupTarget) ID() string { + return t.id +} +func (t backupTarget) SubID() string { + return t.subid +} + +func (t backupTarget) LocalPath() string { + return t.localPath +} + +// getDockerContext retrieves a Docker context with a prepared client handle +func getDockerContext() (*dockerContext, error) { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return nil, err + } + return &dockerContext{cli}, nil +} + +func (d *dockerContext) execInContainer(ctx context.Context, cid string, cmd []string) (types.HijackedResponse, int, error) { + execConfig := types.ExecConfig{ + AttachStdout: true, + AttachStderr: true, + Cmd: cmd, + } + execResp, err := d.cli.ContainerExecCreate(ctx, cid, execConfig) + if err != nil { + return types.HijackedResponse{}, 0, fmt.Errorf("failed to create exec: %w", err) + } + var execStartCheck types.ExecStartCheck + attachResp, err := d.cli.ContainerExecAttach(ctx, execResp.ID, execStartCheck) + if err != nil { + return attachResp, 0, fmt.Errorf("failed to attach to exec: %w", err) + } + var ( + retryMax = 20 + retrySleep = 1 + success bool + inspect types.ContainerExecInspect + ) + for i := 0; i < retryMax; i++ { + inspect, err = d.cli.ContainerExecInspect(ctx, execResp.ID) + if err != nil { + return attachResp, 0, fmt.Errorf("failed to inspect exec: %w", err) + } + if !inspect.Running { + success = true + break + } + time.Sleep(time.Duration(retrySleep) * time.Second) + } + if !success { + return attachResp, 0, fmt.Errorf("failed to wait for exec to finish") + } + return attachResp, inspect.ExitCode, nil +} +func (d *dockerContext) waitForDBConnectionAndGrantPrivileges(mysqlCID, dbuser, dbpass string) error { + ctx := context.Background() + + // Allow up to 20 seconds for the mysql database to be ready + retryMax := 20 + retrySleep := 1 + success := false + + for i := 0; i < retryMax; i++ { + // Check database connectivity + dbValidate := []string{"mysql", fmt.Sprintf("-u%s", dbuser), fmt.Sprintf("-p%s", dbpass), "--protocol=tcp", "-h127.0.0.1", "--wait", "--connect_timeout=20", "tester", "-e", "select 1;"} + attachResp, exitCode, err := d.execInContainer(ctx, mysqlCID, dbValidate) + if err != nil { + return fmt.Errorf("failed to attach to exec: %w", err) + } + defer attachResp.Close() + if exitCode == 0 { + success = true + break + } + + time.Sleep(time.Duration(retrySleep) * time.Second) + } + + if !success { + return fmt.Errorf("failed to connect to database after %d tries", retryMax) + } + + // Ensure the user has the right privileges + dbGrant := []string{"mysql", fmt.Sprintf("-u%s", dbpass), fmt.Sprintf("-p%s", dbpass), "--protocol=tcp", "-h127.0.0.1", "-e", "grant process on *.* to user;"} + attachResp, exitCode, err := d.execInContainer(ctx, mysqlCID, dbGrant) + if err != nil { + return fmt.Errorf("failed to attach to exec: %w", err) + } + defer attachResp.Close() + var bufo, bufe bytes.Buffer + _, _ = stdcopy.StdCopy(&bufo, &bufe, attachResp.Reader) + if exitCode != 0 { + return fmt.Errorf("failed to grant privileges to user: %s", bufe.String()) + } + + return nil +} + +func (d *dockerContext) startSMBContainer(image, name, base string) (cid string, port int, err error) { + return d.startContainer(image, name, "445/tcp", []string{fmt.Sprintf("%s:/share/backups", base)}, nil, nil) +} + +func (d *dockerContext) startContainer(image, name, portMap string, binds []string, cmd []string, env []string) (cid string, port int, err error) { + ctx := context.Background() + + // Start the SMB container + containerConfig := &container.Config{ + Image: image, + Cmd: cmd, + Labels: map[string]string{ + "mysqltest": "", + }, + Env: env, + } + hostConfig := &container.HostConfig{ + Binds: binds, + } + var containerPort nat.Port + if portMap != "" { + containerPort = nat.Port(portMap) + containerConfig.ExposedPorts = nat.PortSet{ + containerPort: struct{}{}, + } + hostConfig.PortBindings = nat.PortMap{ + containerPort: []nat.PortBinding{{HostIP: "0.0.0.0"}}, + } + } + resp, err := d.cli.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, name) + if err != nil { + return + } + cid = resp.ID + err = d.cli.ContainerStart(ctx, cid, types.ContainerStartOptions{}) + if err != nil { + return + } + + // Retrieve the randomly assigned port + if portMap == "" { + return + } + inspect, err := d.cli.ContainerInspect(ctx, cid) + if err != nil { + return + } + portStr := inspect.NetworkSettings.Ports[containerPort][0].HostPort + port, err = strconv.Atoi(portStr) + + return +} + +func (d *dockerContext) makeSMB(smbImage string) error { + ctx := context.Background() + + // Build the smbImage + buildSMBImageOpts := types.ImageBuildOptions{ + Context: nil, + Tags: []string{smbImage}, + Remove: true, + } + + tar, err := archive.TarWithOptions("ctr/", &archive.TarOptions{}) + if err != nil { + return fmt.Errorf("failed to create tar archive: %w", err) + } + buildSMBImageOpts.Context = io.NopCloser(tar) + + resp, err := d.cli.ImageBuild(ctx, buildSMBImageOpts.Context, buildSMBImageOpts) + if err != nil { + return fmt.Errorf("failed to build smb image: %w", err) + } + io.Copy(os.Stdout, resp.Body) + resp.Body.Close() + + return nil +} + +func (d *dockerContext) createBackupFile(mysqlCID, mysqlUser, mysqlPass, outfile, compactOutfile string) error { + ctx := context.Background() + + // Create and populate the table + mysqlCreateCmd := []string{"mysql", "-hlocalhost", "--protocol=tcp", fmt.Sprintf("-u%s", mysqlUser), fmt.Sprintf("-p%s", mysqlPass), "-e", `use tester; create table t1 (id INT, name VARCHAR(20)); INSERT INTO t1 (id,name) VALUES (1, "John"), (2, "Jill"), (3, "Sam"), (4, "Sarah");`} + attachResp, exitCode, err := d.execInContainer(ctx, mysqlCID, mysqlCreateCmd) + if err != nil { + return fmt.Errorf("failed to attach to exec: %w", err) + } + defer attachResp.Close() + if exitCode != 0 { + return fmt.Errorf("failed to create table: %w", err) + } + var bufo, bufe bytes.Buffer + _, _ = stdcopy.StdCopy(&bufo, &bufe, attachResp.Reader) + + // Dump the database - do both compact and non-compact + mysqlDumpCompactCmd := []string{"mysqldump", "-hlocalhost", "--protocol=tcp", fmt.Sprintf("-u%s", mysqlUser), fmt.Sprintf("-p%s", mysqlPass), "--compact", "--databases", "tester"} + attachResp, exitCode, err = d.execInContainer(ctx, mysqlCID, mysqlDumpCompactCmd) + if err != nil { + return fmt.Errorf("failed to attach to exec: %w", err) + } + defer attachResp.Close() + if exitCode != 0 { + return fmt.Errorf("failed to dump database: %w", err) + } + + fCompact, err := os.Create(compactOutfile) + if err != nil { + return err + } + defer fCompact.Close() + + _, _ = stdcopy.StdCopy(fCompact, &bufe, attachResp.Reader) + + bufo.Reset() + bufe.Reset() + + mysqlDumpCmd := []string{"mysqldump", "-hlocalhost", "--protocol=tcp", fmt.Sprintf("-u%s", mysqlUser), fmt.Sprintf("-p%s", mysqlPass), "--databases", "tester"} + attachResp, exitCode, err = d.execInContainer(ctx, mysqlCID, mysqlDumpCmd) + if err != nil { + return fmt.Errorf("failed to attach to exec: %w", err) + } + defer attachResp.Close() + if exitCode != 0 { + return fmt.Errorf("failed to dump database: %w", err) + } + + f, err := os.Create(outfile) + if err != nil { + return err + } + defer f.Close() + + _, _ = stdcopy.StdCopy(f, &bufe, attachResp.Reader) + return err +} + +func (d *dockerContext) logContainers(cids ...string) error { + ctx := context.Background() + for _, cid := range cids { + logOptions := types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + } + logs, err := d.cli.ContainerLogs(ctx, cid, logOptions) + if err != nil { + return fmt.Errorf("failed to get logs for container %s: %w", cid, err) + } + defer logs.Close() + + if _, err := io.Copy(os.Stdout, logs); err != nil { + return fmt.Errorf("failed to stream logs for container %s: %w", cid, err) + } + } + return nil +} + +func (d *dockerContext) rmContainers(cids ...string) error { + ctx := context.Background() + for _, cid := range cids { + if err := d.cli.ContainerKill(ctx, cid, "SIGKILL"); err != nil { + return fmt.Errorf("failed to kill container %s: %w", cid, err) + } + + rmOpts := types.ContainerRemoveOptions{ + RemoveVolumes: true, + Force: true, + } + if err := d.cli.ContainerRemove(ctx, cid, rmOpts); err != nil { + return fmt.Errorf("failed to remove container %s: %w", cid, err) + } + } + return nil +} + +// we need to run through each each target and test the backup. +// before the first run, we: +// - start the sql database +// - populate it with a few inserts/creates +// - run a single clear backup +// for each stage, we: +// - clear the target +// - run the backup +// - check that the backup now is there in the right format +// - clear the target + +func runDumpTest(dc *dockerContext, compact bool, base string, targets []backupTarget, sequence int, smb, mysql containerPort, s3 string) error { + dbconn := database.Connection{ + User: mysqlUser, + Pass: mysqlPass, + Host: "localhost", + Port: mysql.port, + } + var targetVals []storage.Storage + // all targets should have the same sequence, with varying subsequence, so take any one + var id string + for _, target := range targets { + t := target.String() + id = target.ID() + t = target.WithPrefix(base) + localPath := target.LocalPath() + if err := os.MkdirAll(localPath, 0o755); err != nil { + return fmt.Errorf("failed to create local path %s: %w", localPath, err) + } + store, err := storage.ParseURL(t, credentials.Creds{AWSEndpoint: s3}) + if err != nil { + return fmt.Errorf("invalid target url: %v", err) + } + targetVals = append(targetVals, store) + + } + dumpOpts := core.DumpOptions{ + Targets: targetVals, + DBConn: dbconn, + Compressor: &compression.GzipCompressor{}, + Compact: compact, + PreBackupScripts: filepath.Join(base, "backups", id, "pre-backup"), + PostBackupScripts: filepath.Join(base, "backups", id, "post-backup"), + } + timerOpts := core.TimerOptions{ + Once: true, + } + return core.TimerDump(dumpOpts, timerOpts) +} + +func setup(dc *dockerContext, base, backupFile, compactBackupFile string) (mysql, smb containerPort, s3url string, s3backend gofakes3.Backend, err error) { + if err := dc.makeSMB(smbImage); err != nil { + return mysql, smb, s3url, s3backend, fmt.Errorf("failed to build smb image: %v", err) + } + + // start up the various containers + smbCID, smbPort, err := dc.startSMBContainer(smbImage, "smb", base) + if err != nil { + return mysql, smb, s3url, s3backend, fmt.Errorf("failed to start smb container: %v", err) + } + smb = containerPort{name: "smb", id: smbCID, port: smbPort} + + // start the s3 container + s3backend = s3mem.New() + // create the bucket we will use for tests + if err := s3backend.CreateBucket(bucketName); err != nil { + return mysql, smb, s3url, s3backend, fmt.Errorf("failed to create bucket: %v", err) + } + s3 := gofakes3.New(s3backend) + s3server := httptest.NewServer(s3.Server()) + s3url = s3server.URL + + // start the mysql container; configure it for lots of debug logging, in case we need it + mysqlConf := ` +[mysqld] +log_error =/var/log/mysql/mysql_error.log +general_log_file=/var/log/mysql/mysql.log +general_log =1 +slow_query_log =1 +slow_query_log_file=/var/log/mysql/mysql_slow.log +long_query_time =2 +log_queries_not_using_indexes = 1 +` + confFile := filepath.Join(base, "log.cnf") + if err := os.WriteFile(confFile, []byte(mysqlConf), 0644); err != nil { + return mysql, smb, s3url, s3backend, fmt.Errorf("failed to write mysql config file: %v", err) + } + logDir := filepath.Join(base, "mysql_logs") + if err := os.Mkdir(logDir, 0755); err != nil { + return mysql, smb, s3url, s3backend, fmt.Errorf("failed to create mysql log directory: %v", err) + } + mysqlCID, mysqlPort, err := dc.startContainer(mysqlImage, "mysql", "3306/tcp", []string{fmt.Sprintf("%s:/etc/mysql/conf.d/log.conf:ro", confFile), fmt.Sprintf("%s:/var/log/mysql", logDir)}, nil, []string{ + fmt.Sprintf("MYSQL_ROOT_PASSWORD=%s", mysqlRootPass), + "MYSQL_DATABASE=tester", + fmt.Sprintf("MYSQL_USER=%s", mysqlUser), + fmt.Sprintf("MYSQL_PASSWORD=%s", mysqlPass), + }) + if err != nil { + return + } + mysql = containerPort{name: "mysql", id: mysqlCID, port: mysqlPort} + + if err = dc.waitForDBConnectionAndGrantPrivileges(mysqlCID, mysqlRootUser, mysqlRootPass); err != nil { + return + } + + // create the backup file + log.Debugf("Creating backup file") + if err := dc.createBackupFile(mysql.id, mysqlUser, mysqlPass, backupFile, compactBackupFile); err != nil { + return mysql, smb, s3url, s3backend, fmt.Errorf("failed to create backup file: %v", err) + } + return +} + +func targetToTargets(target string, sequence int, smb containerPort, base string) ([]backupTarget, error) { + var ( + targets = strings.Fields(target) + allTargets []backupTarget + ) + id := fmt.Sprintf("%05d", rand.Intn(10000)) + for i, t := range targets { + subid := fmt.Sprintf("%02d", i) + // parse the URL, taking any smb protocol and replacing the host:port with our local host:port + u, err := url.Parse(t) + if err != nil { + return nil, err + } + + // localPath tracks the local path that is equivalent to where the backup + // target points. + var localPath string + relativePath := filepath.Join(id, subid, "data") + subPath := filepath.Join("/backups", relativePath) + localPath = filepath.Join(base, subPath) + + switch u.Scheme { + case "smb": + u.Host = fmt.Sprintf("localhost:%d", smb.port) + u.Path = filepath.Join(u.Path, subPath) + case "file", "": + // explicit or implicit file + u.Scheme = "file" + u.Path = subPath + case "s3": + // prepend the bucket name to the path + // because fakes3 uses path-style naming + u.Path = filepath.Join(u.Hostname(), subPath) + default: + } + // we ignore the path, instead sending the backup to a unique directory + // this WILL break when we do targets that are smb or the like. + // will need to find a way to fix this. + finalTarget := u.String() + allTargets = append(allTargets, backupTarget{s: finalTarget, id: id, subid: subid, localPath: localPath}) + } + // Configure the container + if len(allTargets) == 0 { + return nil, errors.New("must provide at least one target") + } + return allTargets, nil +} + +type checkCommand func(t *testing.T, base string, validBackup []byte, s3backend gofakes3.Backend, targets []backupTarget) + +func runTest(t *testing.T, dc *dockerContext, compact bool, targets []string, base string, prePost bool, backupData []byte, mysql, smb containerPort, s3 string, s3backend gofakes3.Backend, checkCommand checkCommand) { + // run backups for each target + for i, target := range targets { + t.Run(target, func(t *testing.T) { + // should add t.Parallel() here for parallel execution, but later + log.Debugf("Running test for target '%s'", target) + allTargets, err := targetToTargets(target, i, smb, base) + if err != nil { + t.Fatalf("failed to parse target: %v", err) + } + log.Debugf("Populating data for target %s", target) + if err := populateVol(base, allTargets); err != nil { + t.Fatalf("failed to populate volume for target %s: %v", target, err) + } + if err := populatePrePost(base, allTargets); err != nil { + t.Fatalf("failed to populate pre-post for target %s: %v", target, err) + } + log.Debugf("Running backup for target %s", target) + if err := runDumpTest(dc, compact, base, allTargets, i, smb, mysql, s3); err != nil { + t.Fatalf("failed to run dump test: %v", err) + } + + checkCommand(t, base, backupData, s3backend, allTargets) + }) + } +} + +func checkDumpTest(t *testing.T, base string, expected []byte, s3backend gofakes3.Backend, targets []backupTarget) { + // all of it is in the volume we created, so check from there + var ( + backupDataReader io.Reader + ) + // we might have multiple targets + for i, target := range targets { + // check that the expected backups are in the right place + var ( + id = target.ID() + scheme = target.Scheme() + postBackupOutFile = fmt.Sprintf("%s/backups/%s/post-backup/post-backup.txt", base, id) + preBackupOutFile = fmt.Sprintf("%s/backups/%s/pre-backup/pre-backup.txt", base, id) + // useful for restore tests, which are disabled for now, so commented out + //postRestoreFile = fmt.Sprintf("%s/backups/%s/post-restore/post-restore.txt", base, sequence) + //preRestoreFile = fmt.Sprintf("%s/backups/%s/pre-restore/pre-restore.txt", base, sequence) + ) + // postBackup and preBackup are only once for a set of targets + if i == 0 { + msg := fmt.Sprintf("%s %s post-backup", id, target.String()) + if _, err := os.Stat(postBackupOutFile); err != nil { + t.Errorf("%s script didn't run, output file doesn't exist", msg) + } + os.RemoveAll(postBackupOutFile) + + msg = fmt.Sprintf("%s %s pre-backup", id, target.String()) + if _, err := os.Stat(preBackupOutFile); err != nil { + t.Errorf("%s script didn't run, output file doesn't exist", msg) + } + os.RemoveAll(preBackupOutFile) + } + p := target.Path() + if p == "" { + t.Fatalf("target %s has no path", target.String()) + return + } + + switch scheme { + case "s3": + // because we had to add the bucket at the beginning of the path, because fakes3 + // does path-style, remove it now + // the object is sensitive to not starting with '/' + // we do it in 2 steps, though, so that if it was not already starting with a `/`, + // we still will remove the bucketName + p = strings.TrimPrefix(p, "/") + p = strings.TrimPrefix(p, bucketName+"/") + objList, err := s3backend.ListBucket( + bucketName, + &gofakes3.Prefix{HasPrefix: true, Prefix: p}, + gofakes3.ListBucketPage{}, + ) + if err != nil { + t.Fatalf("failed to get backup objects from s3: %v", err) + return + } + for _, objInfo := range objList.Contents { + if strings.HasSuffix(objInfo.Key, ".tgz") { + obj, err := s3backend.GetObject(bucketName, objInfo.Key, nil) + if err != nil { + t.Fatalf("failed to get backup object %s from s3: %v", objInfo.Key, err) + return + } + backupDataReader = obj.Contents + break + } + } + default: + bdir := target.LocalPath() + + var backupFile string + entries, err := os.ReadDir(bdir) + if err != nil { + t.Fatalf("failed to read backup directory %s: %v", bdir, err) + return + } + for _, entry := range entries { + if strings.HasSuffix(entry.Name(), ".tgz") { + backupFile = entry.Name() + break + } + } + if backupFile == "" { + assert.NotEmpty(t, backupFile, "missing backup tgz file %s", id) + continue + } + backupFile = filepath.Join(bdir, backupFile) + backupDataReader, err = os.Open(backupFile) + if err != nil { + t.Fatalf("failed to read backup file %s: %v", backupFile, err) + return + } + } + + // extract the actual data, but filter out lines we do not care about + b, err := gunzipUntarScanFilter(backupDataReader) + assert.NoError(t, err, "failed to extract backup data for %s", id) + expectedFiltered := string(filterLines(bytes.NewReader(expected))) + + // this does not work because of information like the header that is unique + // to each format + assert.Equal(t, expectedFiltered, string(b), "%s tar contents do not match actual dump", id) + } + + return +} + +// gunzipUntarScanFilter is a helper function to extract the actual data from a backup +// It unzips, untars getting the first file, and then scans the file for lines we do not +// care about, returning the remaining content. +func gunzipUntarScanFilter(r io.Reader) (b []byte, err error) { + gr, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + defer gr.Close() + tr := tar.NewReader(gr) + if _, err := tr.Next(); err != nil { + return nil, err + } + return filterLines(tr), nil +} + +// filterLines filters out lines that are allowed to differ +func filterLines(r io.Reader) (b []byte) { + scanner := bufio.NewScanner(r) + for scanner.Scan() { + var use = true + line := scanner.Text() + for _, filter := range dumpFilterRegex { + if filter.Match([]byte(line)) { + use = false + break + } + } + if !use { + continue + } + line += "\n" + b = append(b, line...) + } + return b +} + +func populateVol(base string, targets []backupTarget) (err error) { + for _, target := range targets { + dataDir := target.LocalPath() + if err := os.MkdirAll(dataDir, 0777); err != nil { + return err + } + if err := os.WriteFile(filepath.Join(dataDir, "list"), []byte(fmt.Sprintf("target: %s\n", target)), 0666); err != nil { + return err + } + } + return +} + +func populatePrePost(base string, targets []backupTarget) (err error) { + // Create a test script for the post backup processing test + if len(targets) == 0 { + return fmt.Errorf("no targets specified") + } + id := targets[0].ID() + workdir := filepath.Join(base, "backups", id) + for _, dir := range []string{"pre-backup", "post-backup", "pre-restore", "post-restore"} { + if err := os.MkdirAll(filepath.Join(workdir, dir), 0777); err != nil { + return err + } + if err := os.WriteFile( + filepath.Join(workdir, dir, "test.sh"), + []byte(fmt.Sprintf("#!/bin/bash\ntouch %s.txt", filepath.Join(workdir, dir, dir))), + 0777); err != nil { + return err + } + // test.sh files need to be executable, but we already set them + // might need to do this later + // chmod -R 0777 /backups/${sequence} + // chmod 755 /backups/${sequence}/*/test.sh + } + + return nil +} + +func TestIntegration(t *testing.T) { + syscall.Umask(0) + t.Run("dump", func(t *testing.T) { + var ( + err error + smb, mysql containerPort + s3 string + s3backend gofakes3.Backend + ) + // temporary working directory + base, err := os.MkdirTemp("", "backup-test-") + if err != nil { + t.Fatalf("failed to create temp dir: %v", err) + } + // ensure that the container has full access to it + if err := os.Chmod(base, 0o777); err != nil { + t.Fatalf("failed to chmod temp dir: %v", err) + } + dc, err := getDockerContext() + if err != nil { + t.Fatalf("failed to get docker client: %v", err) + } + backupFile := filepath.Join(base, "backup.sql") + compactBackupFile := filepath.Join(base, "backup-compact.sql") + if mysql, smb, s3, s3backend, err = setup(dc, base, backupFile, compactBackupFile); err != nil { + t.Fatalf("failed to setup test: %v", err) + } + backupData, err := os.ReadFile(backupFile) + if err != nil { + t.Fatalf("failed to read backup file %s: %v", backupFile, err) + } + compactBackupData, err := os.ReadFile(compactBackupFile) + if err != nil { + t.Fatalf("failed to read compact backup file %s: %v", compactBackupFile, err) + } + defer func() { + // log the results before tearing down, if requested + if err := logContainers(dc, smb.id, mysql.id); err != nil { + log.Errorf("failed to get logs from service containers: %v", err) + } + + // tear everything down + if err := teardown(dc, smb.id, mysql.id); err != nil { + log.Errorf("failed to teardown test: %v", err) + } + }() + + // check just the contents of a compact backup + t.Run("full", func(t *testing.T) { + runTest(t, dc, false, []string{ + "/full-backups/", + }, base, false, backupData, mysql, smb, s3, s3backend, checkDumpTest) + }) + + // check just the contents of a backup without minimizing metadata (i.e. non-compact) + t.Run("compact", func(t *testing.T) { + runTest(t, dc, true, []string{ + "/compact-backups/", + }, base, false, compactBackupData, mysql, smb, s3, s3backend, checkDumpTest) + }) + + // test targets + t.Run("targets", func(t *testing.T) { + // set a default region + if err := os.Setenv("AWS_REGION", "us-east-1"); err != nil { + t.Fatalf("failed to set AWS_REGION: %v", err) + } + if err := os.Setenv("AWS_ACCESS_KEY_ID", "abcdefg"); err != nil { + t.Fatalf("failed to set AWS_ACCESS_KEY_ID: %v", err) + } + if err := os.Setenv("AWS_SECRET_ACCESS_KEY", "1234567"); err != nil { + t.Fatalf("failed to set AWS_SECRET_ACCESS_KEY: %v", err) + } + runTest(t, dc, false, []string{ + "/backups/", + "file:///backups/", + "smb://smb/noauth/", + "smb://user:pass@smb/auth", + "smb://CONF;user:pass@smb/auth", + fmt.Sprintf("s3://%s/", bucketName), + "file:///backups/ file:///backups/", + }, base, true, backupData, mysql, smb, s3, s3backend, checkDumpTest) + }) + + }) +} diff --git a/test/Dockerfile_smb b/test/ctr/Dockerfile similarity index 75% rename from test/Dockerfile_smb rename to test/ctr/Dockerfile index c78dff1f..b5933bd9 100644 --- a/test/Dockerfile_smb +++ b/test/ctr/Dockerfile @@ -1,5 +1,4 @@ FROM alpine:3.17 -LABEL org.opencontainers.image.authors="https://github.com/deitch" # smb port EXPOSE 445 @@ -15,5 +14,8 @@ COPY *.tdb /var/lib/samba/private/ RUN adduser user -D -H RUN echo user:pass | chpasswd +# ensure that the directory where we will mount it exists, so that nobody user can write there +RUN mkdir -p /share/backups && chmod 0777 /share/backups + # run samba in the foreground CMD /usr/sbin/smbd -F --debug-stdout -d 4 --no-process-group diff --git a/test/ctr/cron_test.sh b/test/ctr/cron_test.sh deleted file mode 100755 index 0cf9327f..00000000 --- a/test/ctr/cron_test.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/bin/bash -set -e - -DEBUG=${DEBUG:-0} -[[ -n "$DEBUG" && "$DEBUG" == "verbose" ]] && DEBUG=1 -[[ -n "$DEBUG" && "$DEBUG" == "debug" ]] && DEBUG=2 - -[[ "$DEBUG" == "2" ]] && set -x - -# where is our functions file? By default, in container so at /functions.sh -# but can override to run independently -FUNCTIONS=${FUNCTIONS:-/functions.sh} - -. ${FUNCTIONS} - -# list of cron expressions, inputs and results -declare -a cronnum croninput cronresult cronline nowtime waittime - - set -f - tests=( - "* 59 44 44" # 44 is the number in the range * that is >= 44 - "4 59 4 4" # 4 is the number that is greater than or equal to "4" - "5 59 4 5" # 5 is the next number that matches "5", and is >= 4 - "3-7 59 4 4" # 4 is the number that fits within 3-7 - "3-7 59 9 3" # no number in the range 3-7 ever is >= 9, so should cycle back to 3 - "*/2 59 4 4" # 4 is divisible by 2 - "*/5 59 4 5" # 5 is the next number in the range * that is divisible by 5, and is >= 4 - "0-20/5 59 4 5" # 5 is the next number in the range 0-20 that is divisible by 5, and is >= 4 - "15-30/5 59 4 15" # 15 is the next number in the range 15-30 that is in increments of 5, and is >= 4 - "18-30/5 59 4 18" # 18 is the next number >=4 in the range 18,23,28 - "15-30/5 59 20 20" # 20 is the next number in the range 15-30 that is in increments of 5, and is >= 20 - "15-30/5 59 35 15" # no number in the range 15-30/5 will ever be >=35, so should cycle back to 15 - "*/10 12 11 0" # the next match after 11 would be 20, but that would be greater than the maximum of 12, so should cycle back to 0 - "1 11 1 1" - "1,2,3 6 2 2" - "1-3,5 6 2 2" - "5-7,6,1 11 4 5" - "* 11 8 8" - "5,6,2-3,* 30 7 7" - ) - -pass=0 -fail=0 - -for tt in "${tests[@]}"; do - parts=(${tt}) - expected="${parts[3]}" - result=$(next_cron_expression ${parts[0]} ${parts[1]} ${parts[2]}) - if [ "$result" = "$expected" ]; then - ((pass+=1)) - else - echo "Failed next_cron_expression \"$ex\" \"$in\": received \"$out\" instead of \"$re\"" >&2 - ((fail+=1)) - fi -done - -cronline=( - "1 * * * *" - "1 * * * *" - "* 1 * * *" - "1 * * * *" - "0 0 * * *" - "0 0 1 * *" - #"10 2 10 * *" -) - -nowtime=( - "2018-10-10T10:01:00Z" - "2018-10-10T10:00:00Z" - "2018-10-10T10:00:00Z" - "2018-10-10T10:01:10Z" # this line tests that we use the current minute, and not wait for "-10" - "2021-11-30T10:00:00Z" - "2020-12-30T10:00:00Z" # this line tests that we can handle rolling month correctly -) -waittime=( - "0" - "60" - "54000" - "0" - "50400" - "136800" -) - -for ((i=0; i< ${#cronline[@]}; i++)); do - ex="${cronline[$i]}" - in=$(getdateas "${nowtime[$i]}" "+%s") - re=${waittime[$i]} - out=$(wait_for_cron "$ex" "$in" 0) - if [ "$out" = "$re" ]; then - ((pass+=1)) - else - echo "Failed wait_for_cron \"$ex\" \"$in\": received \"$out\" instead of \"$re\"" >&2 - ((fail+=1)) - fi -done - - - -# report results -echo "Passed: $pass" -echo "Failed: $fail" - -if [[ "$fail" != "0" ]]; then - exit 1 -else - exit 0 -fi - diff --git a/test/ctr/entrypoint_test.sh b/test/ctr/entrypoint_test.sh deleted file mode 100755 index 2afd95c7..00000000 --- a/test/ctr/entrypoint_test.sh +++ /dev/null @@ -1,227 +0,0 @@ -#!/bin/bash -set -e - -DEBUG=${DEBUG:-0} -[[ -n "$DEBUG" && "$DEBUG" == "verbose" ]] && DEBUG=1 -[[ -n "$DEBUG" && "$DEBUG" == "debug" ]] && DEBUG=2 - -[[ "$DEBUG" == "2" ]] && set -x - -MYSQLDUMP=/backups/valid.tgz - -function populate_vol() { - local t=$1 - local sequence=$2 - subseq=0 - # we might have multiple targets - for target in $t ; do - seqno="${sequence}-${subseq}" - # where will we store - # create the backups directory - # clear the target - # replace SEQ if needed - t2=${t/SEQ/${seqno}} - mkdir -p /backups/${seqno}/data - chmod -R 0777 /backups/${seqno} - echo "target: ${t2}" >> /backups/${seqno}/list - - # are we working with nopath? - if [[ "$t2" =~ nopath ]]; then - rm -f /backups/nopath - ln -s ${seqno}/data /backups/nopath - fi - - ((subseq++)) || true - done -} - -function populate_pre_post() { - local sequence=$1 - # Create a test script for the post backup processing test - mkdir -p /backups/${sequence}/{pre-backup,post-backup,pre-restore,post-restore} - echo touch /backups/${sequence}/post-backup/post-backup.txt > /backups/${sequence}/post-backup/test.sh - echo touch /backups/${sequence}/post-restore/post-restore.txt > /backups/${sequence}/post-restore/test.sh - echo touch /backups/${sequence}/pre-backup/pre-backup.txt > /backups/${sequence}/pre-backup/test.sh - echo touch /backups/${sequence}/pre-restore/pre-restore.txt > /backups/${sequence}/pre-restore/test.sh - chmod -R 0777 /backups/${sequence} - chmod 755 /backups/${sequence}/*/test.sh -} - -# -function checktest() { - local t=$1 - local sequence=$2 - - - # to make it easier to hunt through output logs - echo >&2 - echo "*** CHECKING SEQUENCE ${sequence} ***" >&2 - - # all of it is in the volume we created, so check from there - POST_BACKUP_OUT_FILE="/backups/${sequence}/post-backup/post-backup.txt" - PRE_BACKUP_OUT_FILE="/backups/${sequence}/pre-backup/pre-backup.txt" - POST_RESTORE_OUT_FILE="/backups/${sequence}/post-restore/post-restore.txt" - PRE_RESTORE_OUT_FILE="/backups/${sequence}/pre-restore/pre-restore.txt" - if [[ -e "${POST_BACKUP_OUT_FILE}" ]]; then - pass+=("$sequence post-backup") - rm -fr ${POST_BACKUP_OUT_FILE} - else - fail+=("$sequence $t pre-backup script didn't run, output file doesn't exist") - fi - if [[ -e "${PRE_BACKUP_OUT_FILE}" ]]; then - pass+=("$sequence pre-backup") - rm -fr ${PRE_BACKUP_OUT_FILE} - else - fail+=("$sequence $t post-backup script didn't run, output file doesn't exist") - fi - - # we might have multiple targets - local subseq=0 - for target in $t ; do - seqno="${sequence}-${subseq}" - # where do we expect backups? - bdir=/backups/${seqno}/data # change our target - if [[ "$DEBUG" != "0" ]]; then - ls -la $bdir >&2 - fi - - - # check that the expected backups are in the right place - # need temporary places to hold files - TMP1=/backups/check1 - TMP2=/backups/check2 - - BACKUP_FILE=$(ls -d1 $bdir/db_backup_*.tgz 2>/dev/null) - - # check for the directory - if [[ ! -d "$bdir" ]]; then - fail+=("$seqno $t missing $bdir") - elif [[ -z "$BACKUP_FILE" ]]; then - fail+=("$seqno $t missing missing backup zip file") - else - # what if it was s3? - [[ -f "${BACKUP_FILE}/.fakes3_metadataFFF/content" ]] && BACKUP_FILE="${BACKUP_FILE}/.fakes3_metadataFFF/content" - - # extract the actual data, but filter out lines we do not care about - # " | cat " at the end so it returns true because we run "set -e" - cat ${BACKUP_FILE} | tar -xOzf - | sed -e 's/^\/\*![0-9]\{5\}.*\/;$//g' | sed 's/^.*SET character_set_client.*$//g' | cat > $TMP1 - cat ${MYSQLDUMP} | tar -xOzf - | sed -e 's/^\/\*![0-9]\{5\}.*\/;$//g' | sed 's/^.*SET character_set_client.*$//g' |cat > $TMP2 - - # check the file contents against the source directory - # " | cat " at the end so it returns true because we run "set -e" - diffout=$(diff $TMP1 $TMP2 | cat) - if [[ -z "$diffout" ]]; then - pass+=("$sequence dump-contents") - else - fail+=("$seqno $t tar contents do not match actual dump") - fi - - fi - if [ -n "$TESTRESTORE" ]; then - if [[ -e "${POST_RESTORE_OUT_FILE}" ]]; then - pass+=("$sequence post-restore") - rm -fr ${POST_RESTORE_OUT_FILE} - else - fail+=("$seqno $t post-restore script didn't run, output file doesn't exist") - fi - if [[ -e "${PRE_RESTORE_OUT_FILE}" ]]; then - pass+=("$sequence pre-restore") - rm -fr ${PRE_RESTORE_OUT_FILE} - else - fail+=("$seqno $t pre-restore script didn't run, output file doesn't exist") - fi - fi - ((subseq++)) || true - done -} - -function check_source_target_test() { - local t=$1 - local sequence=$2 - local cid=$3 - local SOURCE_FILE=$4 - local TARGET_FILE=$5 - - # to make it easier to hunt through output logs - echo >&2 - echo "*** CHECKING SEQUENCE ${sequence} ***" >&2 - - # we might have multiple targets - local subseq=0 - for target in $t ; do - seqno="${sequence}-${subseq}" - # where do we expect backups? - bdir=/backups/${seqno}/data # change our target - if [[ "$DEBUG" != "0" ]]; then - ls -la $bdir - fi - - # check that the expected backups are in the right place - BACKUP_FILE=$(ls -d1 $bdir/${SOURCE_FILE} 2>/dev/null) - - [[ "$DEBUG" != "0" ]] && echo "Checking target backup file exists for target ${target}" - - # check for the directory - if [[ ! -d "$bdir" ]]; then - fail+=("$seqno: $target missing $bdir") - elif [[ -z "$BACKUP_FILE" ]]; then - fail+=("$seqno: $target missing zip file") - else - pass+=($seqno) - fi - - if [[ ! -z ${TARGET_FILE} ]]; then - [[ "$DEBUG" != "0" ]] && echo "Checking target backup filename matches expected ${target}" - local BACKUP_FILE_BASENAME = ${BACKUP_FILE##*/} - [[ ${BACKUP_FILE_BASENAME} == ${TARGET_FILE} ]] && pass+=($seqno) || fail+=("${seqno}: ${target} uploaded target file name does not match expected. Found: ${BACKUP_FILE_BASENAME}") - fi - done -} - -function print_pass_fail() { - for ((i=0; i< ${#pass[@]}; i++)); do - echo "PASS: ${pass[$i]}" - done - for ((i=0; i< ${#fail[@]}; i++)); do - echo "FAIL: ${fail[$i]}" - done -} - -# we do whichever commands were requested -cmd="$1" -target="$2" -seq="$3" -source_file="$4" -target_file="$5" - -declare -a fail -declare -a pass - -case $cmd in -prepare_pre_post) - populate_pre_post $seq - ;; -populate) - populate_vol "$target" $seq - ;; -check) - checktest "$target" $seq - print_pass_fail - ;; -check_source_target) - check_source_target_test "$target" $seq $source_file $target_file - print_pass_fail - ;; -save_dump) - cat > $MYSQLDUMP - ;; -cron) - /cron_test.sh - ;; -*) - echo "unrecognized command: ${cmd}" - exit 2 - ;; -esac - - diff --git a/test/ctr/smb.conf b/test/ctr/smb.conf index c6ccc9fc..572d9e24 100644 --- a/test/ctr/smb.conf +++ b/test/ctr/smb.conf @@ -1,10 +1,6 @@ [global] - # replace "toltec" with your system's hostname - netbios name = conf - # replace "METRAN" with the name of your workgroup - workgroup = CONF security = user diff --git a/test/package_noteardown_test.go b/test/package_noteardown_test.go new file mode 100644 index 00000000..02e7d89e --- /dev/null +++ b/test/package_noteardown_test.go @@ -0,0 +1,9 @@ +//go:build integration && keepcontainers + +package test + +import "fmt" + +func teardown(dc *dockerContext, cids ...string) error { + return nil +} diff --git a/test/test_cron.sh b/test/test_cron.sh deleted file mode 100755 index 27ac66b1..00000000 --- a/test/test_cron.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -e - -DEBUG=${DEBUG:-0} -[[ -n "$DEBUG" && "$DEBUG" == "verbose" ]] && DEBUG=1 -[[ -n "$DEBUG" && "$DEBUG" == "debug" ]] && DEBUG=2 - -[[ "$DEBUG" == "2" ]] && set -x - -# cron unit tests -FUNCTIONS=../functions.sh ./ctr/cron_test.sh - -# run tests using container -source ./_functions.sh - -make_test_images -docker run --rm -e DEBUG=${DEBUG} ${BACKUP_TESTER_IMAGE} cron - diff --git a/test/test_dump.sh b/test/test_dump.sh deleted file mode 100755 index a97a5a89..00000000 --- a/test/test_dump.sh +++ /dev/null @@ -1,122 +0,0 @@ -#!/bin/bash -set -e - -source ./_functions.sh - -# list of sources and targets -declare -a targets - -# fill in with a var -targets=( -"/backups/SEQ/data" -"file:///backups/SEQ/data" -"smb://smb/noauth/SEQ/data" -"smb://smb/nopath" -"smb://user:pass@smb/auth/SEQ/data" -"smb://CONF;user:pass@smb/auth/SEQ/data" -"s3://mybucket/SEQ/data" -"file:///backups/SEQ/data file:///backups/SEQ/data" -) - -# we need to run through each each target and test the backup. -# before the first run, we: -# - start the sql database -# - populate it with a few inserts/creates -# - run a single clear backup -# for each stage, we: -# - clear the target -# - run the backup -# - check that the backup now is there in the right format -# - clear the target - -cids="" -# make the parent for the backups - -makevolume - -# build the core images -make_test_images - -makesmb - -makenetwork - -start_service_containers - -create_backup_file - - -# -# keep track of the sequence -seq=0 -# do the file tests -[[ "$DEBUG" != "0" ]] && echo "Doing tests" -# create each target -[[ "$DEBUG" != "0" ]] && echo "Populating volume for each target" -for ((i=0; i< ${#targets[@]}; i++)); do - t=${targets[$i]} - docker run --label mysqltest --name mysqlbackup-data-populate --rm -v ${BACKUP_VOL}:/backups -v ${CERTS_VOL}:/certs -e MYSQLDUMP_OPTS="${MYSQLDUMP_OPTS}" -e DEBUG=${DEBUG} ${BACKUP_TESTER_IMAGE} populate "$t" $seq - docker run --label mysqltest --name mysqlbackup-data-populate --rm -v ${BACKUP_VOL}:/backups -v ${CERTS_VOL}:/certs -e MYSQLDUMP_OPTS="${MYSQLDUMP_OPTS}" -e DEBUG=${DEBUG} ${BACKUP_TESTER_IMAGE} prepare_pre_post "$t" $seq - # increment our counter - ((seq++)) || true -done -total=$seq - -# keep track of the sequence -seq=0 -# create each target -[[ "$DEBUG" != "0" ]] && echo "Running backups for each target" -for ((i=0; i< ${#targets[@]}; i++)); do - t=${targets[$i]} - cids1=$(run_dump_test "$t" $seq) - cids="$cids $cids1" - # increment our counter - ((seq++)) || true -done - -# now wait for everything -sleepwait 10 - -rm_service_containers $smb_cid $mysql_cid $s3_cid -rm_containers $cids -rm_network - -# see the results and exit accordingly -[[ "$DEBUG" != "0" ]] && echo "Checking results" -declare -a fail -declare -a pass - -seq=0 -for ((i=0; i< ${#targets[@]}; i++)); do - t=${targets[$i]} - results=$(docker run --label mysqltest --name mysqlbackup-data-check --rm -v ${BACKUP_VOL}:/backups -v ${CERTS_VOL}:/certs -e MYSQLDUMP_OPTS="${MYSQLDUMP_OPTS}" -e DEBUG=${DEBUG} ${BACKUP_TESTER_IMAGE} check "$t" $seq) - # save the passes and fails - # | cat - so that it doesn't return an error on no-match - passes=$(echo "$results" | grep '^PASS:' | cat) - fails=$(echo "$results" | grep '^FAIL:' | cat) - echo "passes: '$passes'" - echo "fails: '$fails'" - while read -r line; do - pass+=("$line") - done < <(echo "$passes") - while read -r line; do - [ -n "$line" ] && fail+=("$line") - done < <(echo "$fails") - # increment our counter - ((seq++)) || true -done - -rm_volume - -# report results -echo "Passed: ${#pass[@]}" -echo "Failed: ${#fail[@]}" - -if [[ "${#fail[@]}" != "0" ]]; then - for ((i=0; i< ${#fail[@]}; i++)); do - echo "${fail[$i]}" - done - exit 1 -else - exit 0 -fi diff --git a/test/test_source_target.sh b/test/test_source_target.sh deleted file mode 100755 index 5662c632..00000000 --- a/test/test_source_target.sh +++ /dev/null @@ -1,125 +0,0 @@ -#!/bin/bash -set -e - -source ./_functions.sh - -BACKUP_DIRECTORY_BASE=/tmp/backups.$$ - -# list of sources and targets -declare -a targets - -# fill in with a var -targets=( -"file:///backups/SEQ/data" -"smb://user:pass@smb/auth/SEQ/data" -"s3://mybucket/SEQ/data" -) - -# we need to run through each each target and test the backup. -# before the first run, we: -# - start the sql database -# - populate it with a few inserts/creates -# - run a single clear backup -# for each stage, we: -# - clear the target -# - run the backup -# - check that the backup now is there in the right format -# - clear the target - -[[ "$DEBUG" != "0" ]] && echo "Resetting backups directory" - -/bin/rm -rf ${BACKUP_DIRECTORY_BASE} -mkdir -p ${BACKUP_DIRECTORY_BASE} -chmod -R 0777 ${BACKUP_DIRECTORY_BASE} - -cids="" -# make the parent for the backups - -makevolume - -# build the core images -make_test_images - -makesmb - -makenetwork - -start_service_containers - -create_backup_file - -[[ "$DEBUG" != "0" ]] && echo "Doing tests" - -# keep track of the sequence -seq=0 - -# create each target -[[ "$DEBUG" != "0" ]] && echo "Populating volume for each target" -for ((i=0; i< ${#targets[@]}; i++)); do - t=${targets[$i]} - docker run --label mysqltest --name mysqlbackup-data-populate --rm -v ${BACKUP_VOL}:/backups -v ${CERTS_VOL}:/certs -e DEBUG=${DEBUG} ${BACKUP_TESTER_IMAGE} populate "$t" $seq - # increment our counter - ((seq++)) || true -done -total=$seq - -# do the file tests -# keep track of the sequence -seq=0 -[[ "$DEBUG" != "0" ]] && echo "Doing tests" -# create each target -[[ "$DEBUG" != "0" ]] && echo "Running backups for each target" -for ((i=0; i< ${#targets[@]}; i++)); do - t=${targets[$i]} - cids1=$(run_dump_test "$t" $seq) - cids="$cids $cids1" - # increment our counter - ((seq++)) || true -done -total=$seq - -# now wait for everything -sleepwait 10 - -rm_service_containers $smb_cid $mysql_cid $s3_cid -rm_containers $cids -rm_network - -# now check each result -[[ "$DEBUG" != "0" ]] && echo "Checking results" -declare -a fail -declare -a pass -seq=0 -for ((i=0; i< ${#targets[@]}; i++)); do - t=${targets[$i]} - results=$(docker run --label mysqltest --name mysqlbackup-data-check --rm -v ${BACKUP_VOL}:/backups -v ${CERTS_VOL}:/certs -e DEBUG=${DEBUG} ${BACKUP_TESTER_IMAGE} check_source_target "$t" $seq $(get_default_source)) - # save the passes and fails - # | cat - so that it doesn't return an error on no-match - passes=$(echo "$results" | grep '^PASS:' | cat) - fails=$(echo "$results" | grep '^FAIL:' | cat) - echo "passes: '$passes'" - echo "fails: '$fails'" - while read -r line; do - pass+=("$line") - done < <(echo "$passes") - while read -r line; do - [ -n "$line" ] && fail+=("$line") - done < <(echo "$fails") - # increment our counter - ((seq++)) || true -done - -rm_volume - -# report results -echo "Passed: ${#pass[@]}" -echo "Failed: ${#fail[@]}" - -if [[ "${#fail[@]}" != "0" ]]; then - for ((i=0; i< ${#fail[@]}; i++)); do - echo "${fail[$i]}" - done - exit 1 -else - exit 0 -fi