forked from databacker/mysql-backup
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request databacker#246 from databacker/no-override-config-…
…unless-set
- Loading branch information
1 parent
85e24ae
commit 6de0ddf
Showing
2 changed files
with
275 additions
and
29 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,3 +1,247 @@ | ||
#!/bin/sh | ||
|
||
/mysql-backup $@ | ||
. /functions.sh | ||
|
||
if [[ -n "$DB_DUMP_DEBUG" ]]; then | ||
set -x | ||
fi | ||
|
||
# get all variables from environment variables or files (e.g. VARIABLE_NAME_FILE) | ||
# (setting defaults happens here, too) | ||
file_env "DB_SERVER" | ||
file_env "DB_PORT" | ||
file_env "DB_USER" | ||
file_env "DB_PASS" | ||
file_env "DB_NAMES" | ||
file_env "DB_NAMES_EXCLUDE" | ||
|
||
file_env "DB_DUMP_FREQ" "1440" | ||
file_env "DB_DUMP_BEGIN" "+0" | ||
file_env "DB_DUMP_DEBUG" | ||
file_env "DB_DUMP_TARGET" "/backup" | ||
file_env "DB_DUMP_BY_SCHEMA" | ||
file_env "DB_DUMP_KEEP_PERMISSIONS" "true" | ||
|
||
file_env "DB_RESTORE_TARGET" | ||
|
||
file_env "AWS_ENDPOINT_URL" | ||
file_env "AWS_ENDPOINT_OPT" | ||
file_env "AWS_CLI_OPTS" | ||
file_env "AWS_CLI_S3_CP_OPTS" | ||
file_env "AWS_ACCESS_KEY_ID" | ||
file_env "AWS_SECRET_ACCESS_KEY" | ||
file_env "AWS_DEFAULT_REGION" | ||
|
||
file_env "SMB_USER" | ||
file_env "SMB_PASS" | ||
|
||
file_env "TMP_PATH" "/tmp" | ||
|
||
file_env "COMPRESSION" "gzip" | ||
|
||
if [[ -n "$DB_DUMP_DEBUG" ]]; then | ||
set -x | ||
fi | ||
|
||
# ensure it is defined | ||
MYSQLDUMP_OPTS=${MYSQLDUMP_OPTS:-} | ||
|
||
# login credentials | ||
if [ -n "${DB_USER}" ]; then | ||
DBUSER="-u${DB_USER}" | ||
else | ||
DBUSER= | ||
fi | ||
if [ -n "${DB_PASS}" ]; then | ||
DBPASS="-p${DB_PASS}" | ||
else | ||
DBPASS= | ||
fi | ||
|
||
# database server | ||
if [ -z "${DB_SERVER}" ]; then | ||
echo "DB_SERVER variable is required. Exiting." | ||
exit 1 | ||
fi | ||
# database port | ||
if [ -z "${DB_PORT}" ]; then | ||
echo "DB_PORT not provided, defaulting to 3306" | ||
DB_PORT=3306 | ||
fi | ||
|
||
# | ||
# set compress and decompress commands | ||
COMPRESS= | ||
UNCOMPRESS= | ||
case $COMPRESSION in | ||
gzip) | ||
COMPRESS="gzip" | ||
UNCOMPRESS="gunzip" | ||
EXTENSION="tgz" | ||
;; | ||
bzip2) | ||
COMPRESS="bzip2" | ||
UNCOMPRESS="bzip2 -d" | ||
EXTENSION="tbz2" | ||
;; | ||
*) | ||
echo "Unknown compression requested: $COMPRESSION" >&2 | ||
exit 1 | ||
esac | ||
|
||
|
||
# temporary dump dir | ||
TMPDIR="${TMP_PATH}/backups" | ||
TMPRESTORE="${TMP_PATH}/restorefile" | ||
|
||
# this is global, so has to be set outside | ||
declare -A uri | ||
|
||
|
||
|
||
if [[ -n "$DB_RESTORE_TARGET" ]]; then | ||
# Execute additional scripts for pre backup restore processing. For example, | ||
# uncompress a tarball that contains the tarballs for the sql dump and a | ||
# wordpress installation. | ||
if [ -d /scripts.d/pre-restore/ ]; then | ||
for i in $(ls /scripts.d/pre-restore/*.sh); do | ||
if [ -x $i ]; then | ||
DB_RESTORE_TARGET=${DB_RESTORE_TARGET} DB_DUMP_DEBUG=${DB_DUMP_DEBUG} $i | ||
fi | ||
done | ||
fi | ||
uri_parser ${DB_RESTORE_TARGET} | ||
if [[ "${uri[schema]}" == "file" ]]; then | ||
cp $DB_RESTORE_TARGET $TMPRESTORE 2>/dev/null | ||
elif [[ "${uri[schema]}" == "s3" ]]; then | ||
[[ -n "$AWS_ENDPOINT_URL" ]] && AWS_ENDPOINT_OPT="--endpoint-url $AWS_ENDPOINT_URL" | ||
aws ${AWS_CLI_OPTS} ${AWS_ENDPOINT_OPT} s3 cp ${AWS_CLI_S3_CP_OPTS} "${DB_RESTORE_TARGET}" $TMPRESTORE | ||
elif [[ "${uri[schema]}" == "smb" ]]; then | ||
if [[ -n "$SMB_USER" ]]; then | ||
UPASSARG="-U" | ||
UPASS="${SMB_USER}%${SMB_PASS}" | ||
elif [[ -n "${uri[user]}" ]]; then | ||
UPASSARG="-U" | ||
UPASS="${uri[user]}%${uri[password]}" | ||
else | ||
UPASSARG= | ||
UPASS= | ||
fi | ||
if [[ -n "${uri[userdomain]}" ]]; then | ||
UDOM="-W ${uri[userdomain]}" | ||
else | ||
UDOM= | ||
fi | ||
smbclient -N "//${uri[host]}/${uri[share]}" ${UPASSARG} "${UPASS}" ${UDOM} -c "get ${uri[sharepath]} ${TMPRESTORE}" | ||
fi | ||
# did we get a file? | ||
if [[ -f "$TMPRESTORE" ]]; then | ||
if [ "$SINGLE_DATABASE" = "true" ]; then | ||
DBDATABASE="-D$DB_NAMES" | ||
else | ||
DBDATABASE= | ||
fi | ||
workdir="${TMP_PATH}/restore.$$" | ||
rm -rf $workdir | ||
mkdir -p $workdir | ||
$UNCOMPRESS < $TMPRESTORE | tar -C $workdir -xvf - | ||
RESTORE_OPTS=${RESTORE_OPTS:-} | ||
# If there are multiple schemas in the archive (e.g. DB_DUMP_BY_SCHEMA was used) and DB_NAMES is set, | ||
# restore only the required databases | ||
if [[ $(ls -1q $workdir/* | wc -l) -gt 1 ]] && [[ -n "$DB_NAMES" ]]; then | ||
for onedb in $DB_NAMES; do | ||
echo "Restoring $onedb from " $workdir/$onedb* | ||
# /!\ If a schema has a name that begins with another one, it will executed multiple times the other one | ||
cat $workdir/$onedb* | mysql $RESTORE_OPTS -h $DB_SERVER -P $DB_PORT $DBUSER $DBPASS | ||
done | ||
else | ||
cat $workdir/* | mysql $RESTORE_OPTS -h $DB_SERVER -P $DB_PORT $DBUSER $DBPASS $DBDATABASE | ||
fi | ||
rm -rf $workdir | ||
/bin/rm -f $TMPRESTORE | ||
else | ||
echo "Could not find restore file $DB_RESTORE_TARGET" | ||
exit 1 | ||
fi | ||
# Execute additional scripts for post backup restore processing. For example, | ||
# uncompress a tarball that contains the files of a wordpress installation | ||
if [ -d /scripts.d/post-restore/ ]; then | ||
for i in $(ls /scripts.d/post-restore/*.sh); do | ||
if [ -x $i ]; then | ||
DB_RESTORE_TARGET=${DB_RESTORE_TARGET} DB_DUMP_DEBUG=${DB_DUMP_DEBUG} $i | ||
fi | ||
done | ||
fi | ||
else | ||
# wait for the next time to start a backup | ||
# for debugging | ||
echo Starting at $(date) | ||
last_run=0 | ||
current_time=$(date +"%s") | ||
freq_time=$(($DB_DUMP_FREQ*60)) | ||
# get the begin time on our date | ||
# REMEMBER: we are using the basic date package in alpine | ||
# could be a delay in minutes or an absolute time of day | ||
if [ -n "$DB_DUMP_CRON" ]; then | ||
# calculate how long until the next cron instance is met | ||
waittime=$(wait_for_cron "$DB_DUMP_CRON" "$current_time" $last_run) | ||
elif [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then | ||
waittime=$(( ${BASH_REMATCH[1]} * 60 )) | ||
target_time=$(($current_time + $waittime)) | ||
else | ||
today=$(date +"%Y-%m-%d") | ||
target_time=$(date --date="${today} ${DB_DUMP_BEGIN}" +"%s") | ||
|
||
if [[ "$target_time" < "$current_time" ]]; then | ||
target_time=$(($target_time + 24*60*60)) | ||
fi | ||
|
||
waittime=$(($target_time - $current_time)) | ||
fi | ||
|
||
# If RUN_ONCE is set, don't wait | ||
if [ -z "${RUN_ONCE}" ]; then | ||
sleep $waittime | ||
last_run=$(date +"%s") | ||
fi | ||
|
||
# enter the loop | ||
exit_code=0 | ||
while true; do | ||
# make sure the directory exists | ||
mkdir -p $TMPDIR | ||
do_dump | ||
[ $? -ne 0 ] && exit_code=1 | ||
# we can have multiple targets | ||
for target in ${DB_DUMP_TARGET}; do | ||
backup_target ${target} | ||
[ $? -ne 0 ] && exit_code=1 | ||
done | ||
# remove lingering file | ||
/bin/rm ${TMPDIR}/${SOURCE} | ||
|
||
# wait, unless RUN_ONCE is set | ||
current_time=$(date +"%s") | ||
if [ -n "${RUN_ONCE}" ]; then | ||
exit $exit_code | ||
elif [ -n "${DB_DUMP_CRON}" ]; then | ||
waittime=$(wait_for_cron "${DB_DUMP_CRON}" "$current_time" $last_run) | ||
else | ||
current_time=$(date +"%s") | ||
# Calculate how long the previous backup took | ||
backup_time=$(($current_time - $target_time)) | ||
# Calculate how many times the frequency time was passed during the previous backup. | ||
freq_time_count=$(($backup_time / $freq_time)) | ||
# Increment the count with one because we want to wait at least the frequency time once. | ||
freq_time_count_to_add=$(($freq_time_count + 1)) | ||
# Calculate the extra time to add to the previous target time | ||
extra_time=$(($freq_time_count_to_add*$freq_time)) | ||
# Calculate the new target time needed for the next calculation | ||
target_time=$(($target_time + $extra_time)) | ||
# Calculate the wait time | ||
waittime=$(($target_time - $current_time)) | ||
fi | ||
sleep $waittime | ||
last_run=$(date +"%s") | ||
done | ||
fi |