|
1 | 1 | #!/bin/sh
|
2 | 2 |
|
3 |
| -/mysql-backup $@ |
| 3 | +. /functions.sh |
| 4 | + |
| 5 | +if [[ -n "$DB_DUMP_DEBUG" ]]; then |
| 6 | + set -x |
| 7 | +fi |
| 8 | + |
| 9 | +# get all variables from environment variables or files (e.g. VARIABLE_NAME_FILE) |
| 10 | +# (setting defaults happens here, too) |
| 11 | +file_env "DB_SERVER" |
| 12 | +file_env "DB_PORT" |
| 13 | +file_env "DB_USER" |
| 14 | +file_env "DB_PASS" |
| 15 | +file_env "DB_NAMES" |
| 16 | +file_env "DB_NAMES_EXCLUDE" |
| 17 | + |
| 18 | +file_env "DB_DUMP_FREQ" "1440" |
| 19 | +file_env "DB_DUMP_BEGIN" "+0" |
| 20 | +file_env "DB_DUMP_DEBUG" |
| 21 | +file_env "DB_DUMP_TARGET" "/backup" |
| 22 | +file_env "DB_DUMP_BY_SCHEMA" |
| 23 | +file_env "DB_DUMP_KEEP_PERMISSIONS" "true" |
| 24 | + |
| 25 | +file_env "DB_RESTORE_TARGET" |
| 26 | + |
| 27 | +file_env "AWS_ENDPOINT_URL" |
| 28 | +file_env "AWS_ENDPOINT_OPT" |
| 29 | +file_env "AWS_CLI_OPTS" |
| 30 | +file_env "AWS_CLI_S3_CP_OPTS" |
| 31 | +file_env "AWS_ACCESS_KEY_ID" |
| 32 | +file_env "AWS_SECRET_ACCESS_KEY" |
| 33 | +file_env "AWS_DEFAULT_REGION" |
| 34 | + |
| 35 | +file_env "SMB_USER" |
| 36 | +file_env "SMB_PASS" |
| 37 | + |
| 38 | +file_env "TMP_PATH" "/tmp" |
| 39 | + |
| 40 | +file_env "COMPRESSION" "gzip" |
| 41 | + |
| 42 | +if [[ -n "$DB_DUMP_DEBUG" ]]; then |
| 43 | + set -x |
| 44 | +fi |
| 45 | + |
| 46 | +# ensure it is defined |
| 47 | +MYSQLDUMP_OPTS=${MYSQLDUMP_OPTS:-} |
| 48 | + |
| 49 | +# login credentials |
| 50 | +if [ -n "${DB_USER}" ]; then |
| 51 | + DBUSER="-u${DB_USER}" |
| 52 | +else |
| 53 | + DBUSER= |
| 54 | +fi |
| 55 | +if [ -n "${DB_PASS}" ]; then |
| 56 | + DBPASS="-p${DB_PASS}" |
| 57 | +else |
| 58 | + DBPASS= |
| 59 | +fi |
| 60 | + |
| 61 | +# database server |
| 62 | +if [ -z "${DB_SERVER}" ]; then |
| 63 | + echo "DB_SERVER variable is required. Exiting." |
| 64 | + exit 1 |
| 65 | +fi |
| 66 | +# database port |
| 67 | +if [ -z "${DB_PORT}" ]; then |
| 68 | + echo "DB_PORT not provided, defaulting to 3306" |
| 69 | + DB_PORT=3306 |
| 70 | +fi |
| 71 | + |
| 72 | +# |
| 73 | +# set compress and decompress commands |
| 74 | +COMPRESS= |
| 75 | +UNCOMPRESS= |
| 76 | +case $COMPRESSION in |
| 77 | + gzip) |
| 78 | + COMPRESS="gzip" |
| 79 | + UNCOMPRESS="gunzip" |
| 80 | + EXTENSION="tgz" |
| 81 | + ;; |
| 82 | + bzip2) |
| 83 | + COMPRESS="bzip2" |
| 84 | + UNCOMPRESS="bzip2 -d" |
| 85 | + EXTENSION="tbz2" |
| 86 | + ;; |
| 87 | + *) |
| 88 | + echo "Unknown compression requested: $COMPRESSION" >&2 |
| 89 | + exit 1 |
| 90 | +esac |
| 91 | + |
| 92 | + |
| 93 | +# temporary dump dir |
| 94 | +TMPDIR="${TMP_PATH}/backups" |
| 95 | +TMPRESTORE="${TMP_PATH}/restorefile" |
| 96 | + |
| 97 | +# this is global, so has to be set outside |
| 98 | +declare -A uri |
| 99 | + |
| 100 | + |
| 101 | + |
| 102 | +if [[ -n "$DB_RESTORE_TARGET" ]]; then |
| 103 | + # Execute additional scripts for pre backup restore processing. For example, |
| 104 | + # uncompress a tarball that contains the tarballs for the sql dump and a |
| 105 | + # wordpress installation. |
| 106 | + if [ -d /scripts.d/pre-restore/ ]; then |
| 107 | + for i in $(ls /scripts.d/pre-restore/*.sh); do |
| 108 | + if [ -x $i ]; then |
| 109 | + DB_RESTORE_TARGET=${DB_RESTORE_TARGET} DB_DUMP_DEBUG=${DB_DUMP_DEBUG} $i |
| 110 | + fi |
| 111 | + done |
| 112 | + fi |
| 113 | + uri_parser ${DB_RESTORE_TARGET} |
| 114 | + if [[ "${uri[schema]}" == "file" ]]; then |
| 115 | + cp $DB_RESTORE_TARGET $TMPRESTORE 2>/dev/null |
| 116 | + elif [[ "${uri[schema]}" == "s3" ]]; then |
| 117 | + [[ -n "$AWS_ENDPOINT_URL" ]] && AWS_ENDPOINT_OPT="--endpoint-url $AWS_ENDPOINT_URL" |
| 118 | + aws ${AWS_CLI_OPTS} ${AWS_ENDPOINT_OPT} s3 cp ${AWS_CLI_S3_CP_OPTS} "${DB_RESTORE_TARGET}" $TMPRESTORE |
| 119 | + elif [[ "${uri[schema]}" == "smb" ]]; then |
| 120 | + if [[ -n "$SMB_USER" ]]; then |
| 121 | + UPASSARG="-U" |
| 122 | + UPASS="${SMB_USER}%${SMB_PASS}" |
| 123 | + elif [[ -n "${uri[user]}" ]]; then |
| 124 | + UPASSARG="-U" |
| 125 | + UPASS="${uri[user]}%${uri[password]}" |
| 126 | + else |
| 127 | + UPASSARG= |
| 128 | + UPASS= |
| 129 | + fi |
| 130 | + if [[ -n "${uri[userdomain]}" ]]; then |
| 131 | + UDOM="-W ${uri[userdomain]}" |
| 132 | + else |
| 133 | + UDOM= |
| 134 | + fi |
| 135 | + smbclient -N "//${uri[host]}/${uri[share]}" ${UPASSARG} "${UPASS}" ${UDOM} -c "get ${uri[sharepath]} ${TMPRESTORE}" |
| 136 | + fi |
| 137 | + # did we get a file? |
| 138 | + if [[ -f "$TMPRESTORE" ]]; then |
| 139 | + if [ "$SINGLE_DATABASE" = "true" ]; then |
| 140 | + DBDATABASE="-D$DB_NAMES" |
| 141 | + else |
| 142 | + DBDATABASE= |
| 143 | + fi |
| 144 | + workdir="${TMP_PATH}/restore.$$" |
| 145 | + rm -rf $workdir |
| 146 | + mkdir -p $workdir |
| 147 | + $UNCOMPRESS < $TMPRESTORE | tar -C $workdir -xvf - |
| 148 | + RESTORE_OPTS=${RESTORE_OPTS:-} |
| 149 | + # If there are multiple schemas in the archive (e.g. DB_DUMP_BY_SCHEMA was used) and DB_NAMES is set, |
| 150 | + # restore only the required databases |
| 151 | + if [[ $(ls -1q $workdir/* | wc -l) -gt 1 ]] && [[ -n "$DB_NAMES" ]]; then |
| 152 | + for onedb in $DB_NAMES; do |
| 153 | + echo "Restoring $onedb from " $workdir/$onedb* |
| 154 | + # /!\ If a schema has a name that begins with another one, it will executed multiple times the other one |
| 155 | + cat $workdir/$onedb* | mysql $RESTORE_OPTS -h $DB_SERVER -P $DB_PORT $DBUSER $DBPASS |
| 156 | + done |
| 157 | + else |
| 158 | + cat $workdir/* | mysql $RESTORE_OPTS -h $DB_SERVER -P $DB_PORT $DBUSER $DBPASS $DBDATABASE |
| 159 | + fi |
| 160 | + rm -rf $workdir |
| 161 | + /bin/rm -f $TMPRESTORE |
| 162 | + else |
| 163 | + echo "Could not find restore file $DB_RESTORE_TARGET" |
| 164 | + exit 1 |
| 165 | + fi |
| 166 | + # Execute additional scripts for post backup restore processing. For example, |
| 167 | + # uncompress a tarball that contains the files of a wordpress installation |
| 168 | + if [ -d /scripts.d/post-restore/ ]; then |
| 169 | + for i in $(ls /scripts.d/post-restore/*.sh); do |
| 170 | + if [ -x $i ]; then |
| 171 | + DB_RESTORE_TARGET=${DB_RESTORE_TARGET} DB_DUMP_DEBUG=${DB_DUMP_DEBUG} $i |
| 172 | + fi |
| 173 | + done |
| 174 | + fi |
| 175 | +else |
| 176 | + # wait for the next time to start a backup |
| 177 | + # for debugging |
| 178 | + echo Starting at $(date) |
| 179 | + last_run=0 |
| 180 | + current_time=$(date +"%s") |
| 181 | + freq_time=$(($DB_DUMP_FREQ*60)) |
| 182 | + # get the begin time on our date |
| 183 | + # REMEMBER: we are using the basic date package in alpine |
| 184 | + # could be a delay in minutes or an absolute time of day |
| 185 | + if [ -n "$DB_DUMP_CRON" ]; then |
| 186 | + # calculate how long until the next cron instance is met |
| 187 | + waittime=$(wait_for_cron "$DB_DUMP_CRON" "$current_time" $last_run) |
| 188 | + elif [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then |
| 189 | + waittime=$(( ${BASH_REMATCH[1]} * 60 )) |
| 190 | + target_time=$(($current_time + $waittime)) |
| 191 | + else |
| 192 | + today=$(date +"%Y-%m-%d") |
| 193 | + target_time=$(date --date="${today} ${DB_DUMP_BEGIN}" +"%s") |
| 194 | + |
| 195 | + if [[ "$target_time" < "$current_time" ]]; then |
| 196 | + target_time=$(($target_time + 24*60*60)) |
| 197 | + fi |
| 198 | + |
| 199 | + waittime=$(($target_time - $current_time)) |
| 200 | + fi |
| 201 | + |
| 202 | + # If RUN_ONCE is set, don't wait |
| 203 | + if [ -z "${RUN_ONCE}" ]; then |
| 204 | + sleep $waittime |
| 205 | + last_run=$(date +"%s") |
| 206 | + fi |
| 207 | + |
| 208 | + # enter the loop |
| 209 | + exit_code=0 |
| 210 | + while true; do |
| 211 | + # make sure the directory exists |
| 212 | + mkdir -p $TMPDIR |
| 213 | + do_dump |
| 214 | + [ $? -ne 0 ] && exit_code=1 |
| 215 | + # we can have multiple targets |
| 216 | + for target in ${DB_DUMP_TARGET}; do |
| 217 | + backup_target ${target} |
| 218 | + [ $? -ne 0 ] && exit_code=1 |
| 219 | + done |
| 220 | + # remove lingering file |
| 221 | + /bin/rm ${TMPDIR}/${SOURCE} |
| 222 | + |
| 223 | + # wait, unless RUN_ONCE is set |
| 224 | + current_time=$(date +"%s") |
| 225 | + if [ -n "${RUN_ONCE}" ]; then |
| 226 | + exit $exit_code |
| 227 | + elif [ -n "${DB_DUMP_CRON}" ]; then |
| 228 | + waittime=$(wait_for_cron "${DB_DUMP_CRON}" "$current_time" $last_run) |
| 229 | + else |
| 230 | + current_time=$(date +"%s") |
| 231 | + # Calculate how long the previous backup took |
| 232 | + backup_time=$(($current_time - $target_time)) |
| 233 | + # Calculate how many times the frequency time was passed during the previous backup. |
| 234 | + freq_time_count=$(($backup_time / $freq_time)) |
| 235 | + # Increment the count with one because we want to wait at least the frequency time once. |
| 236 | + freq_time_count_to_add=$(($freq_time_count + 1)) |
| 237 | + # Calculate the extra time to add to the previous target time |
| 238 | + extra_time=$(($freq_time_count_to_add*$freq_time)) |
| 239 | + # Calculate the new target time needed for the next calculation |
| 240 | + target_time=$(($target_time + $extra_time)) |
| 241 | + # Calculate the wait time |
| 242 | + waittime=$(($target_time - $current_time)) |
| 243 | + fi |
| 244 | + sleep $waittime |
| 245 | + last_run=$(date +"%s") |
| 246 | + done |
| 247 | +fi |
0 commit comments