Merge "Backup/restore enhancements"
This commit is contained in:
commit
a7df3d89bb
367
helm-toolkit/templates/scripts/db-backup-restore/_backup_main.sh.tpl
Executable file
367
helm-toolkit/templates/scripts/db-backup-restore/_backup_main.sh.tpl
Executable file
@ -0,0 +1,367 @@
|
||||
{{- define "helm-toolkit.scripts.db-backup-restore.backup_main" }}
|
||||
#!/bin/bash
|
||||
|
||||
# This file contains a database backup framework which database scripts
|
||||
# can use to perform a backup. The idea here is that the database-specific
|
||||
# functions will be implemented by the various databases using this script
|
||||
# (like mariadb, postgresql or etcd for example). The database-specific
|
||||
# script will need to first "source" this file like this:
|
||||
# source /tmp/backup_main.sh
|
||||
#
|
||||
# Then the script should call the main backup function (backup_databases):
|
||||
# backup_databases
|
||||
#
|
||||
# No arguments required. However, the framework will require the
|
||||
# following variables to be exported:
|
||||
#
|
||||
# export DB_NAMESPACE Namespace where the database(s) reside
|
||||
# export DB_NAME Name of the database system
|
||||
# export LOCAL_DAYS_TO_KEEP Number of days to keep the local backups
|
||||
# export REMOTE_DAYS_TO_KEEP Number of days to keep the remote backups
|
||||
# export ARCHIVE_DIR Local location where the backup tarballs should
|
||||
# be stored. (full directory path)
|
||||
# export REMOTE_BACKUP_ENABLED "true" if remote backup enabled; false
|
||||
# otherwise
|
||||
# export CONTAINER_NAME Name of the container on the RGW to store
|
||||
# the backup tarball.
|
||||
# export STORAGE_POLICY Name of the storage policy defined on the
|
||||
# RGW which is intended to store backups.
|
||||
# RGW access variables:
|
||||
# export OS_REGION_NAME Name of the region the RGW resides in
|
||||
# export OS_AUTH_URL Keystone URL associated with the RGW
|
||||
# export OS_PROJECT_NAME Name of the project associated with the
|
||||
# keystone user
|
||||
# export OS_USERNAME Name of the keystone user
|
||||
# export OS_PASSWORD Password of the keystone user
|
||||
# export OS_USER_DOMAIN_NAME Keystone domain the project belongs to
|
||||
# export OS_PROJECT_DOMAIN_NAME Keystone domain the user belongs to
|
||||
# export OS_IDENTITY_API_VERSION Keystone API version to use
|
||||
#
|
||||
# The following variables are optional:
|
||||
# export RGW_TIMEOUT Number of seconds to wait for the
|
||||
# connection to the RGW to be available
|
||||
# when sending a backup to the RGW. Default
|
||||
# is 1800 (30 minutes).
|
||||
#
|
||||
# The database-specific functions that need to be implemented are:
|
||||
# dump_databases_to_directory <directory> <err_logfile>
|
||||
# where:
|
||||
# <directory> is the full directory path to dump the database files
|
||||
# into. This is a temporary directory for this backup only.
|
||||
# <err_logfile> is the full directory path where error logs are to be
|
||||
# written by the application.
|
||||
# returns: 0 if no errors; 1 if any errors occurred
|
||||
#
|
||||
# This function is expected to dump the database file(s) to the specified
|
||||
# directory path. If this function completes successfully (returns 0), the
|
||||
# framework will automatically tar/zip the files in that directory and
|
||||
# name the tarball appropriately according to the proper conventions.
|
||||
#
|
||||
# The functions in this file will take care of:
|
||||
# 1) Calling "dump_databases_to_directory" and then compressing the files,
|
||||
# naming the tarball properly, and then storing it locally at the specified
|
||||
# local directory.
|
||||
# 2) Sending the tarball built to the remote gateway, to be stored in the
|
||||
# container configured to store database backups.
|
||||
# 3) Removing local backup tarballs which are older than the number of days
|
||||
# specified by the "LOCAL_DAYS_TO_KEEP" variable.
|
||||
# 4) Removing remote backup tarballs (from the remote gateway) which are older
|
||||
# than the number of days specified by the "REMOTE_DAYS_TO_KEEP" variable.
|
||||
#
|
||||
|
||||
# Note: not using set -e in this script because more elaborate error handling
|
||||
# is needed.
|
||||
set -x
|
||||
|
||||
log_backup_error_exit() {
|
||||
MSG=$1
|
||||
ERRCODE=$2
|
||||
log ERROR "${DB_NAME}_backup" "${MSG}"
|
||||
rm -f $ERR_LOG_FILE
|
||||
rm -rf $TMP_DIR
|
||||
exit $ERRCODE
|
||||
}
|
||||
|
||||
log() {
|
||||
#Log message to a file or stdout
|
||||
#TODO: This can be convert into mail alert of alert send to a monitoring system
|
||||
#Params: $1 log level
|
||||
#Params: $2 service
|
||||
#Params: $3 message
|
||||
#Params: $4 Destination
|
||||
LEVEL=$1
|
||||
SERVICE=$2
|
||||
MSG=$3
|
||||
DEST=$4
|
||||
DATE=$(date +"%m-%d-%y %H:%M:%S")
|
||||
if [[ -z "$DEST" ]]; then
|
||||
echo "${DATE} ${LEVEL}: $(hostname) ${SERVICE}: ${MSG}"
|
||||
else
|
||||
echo "${DATE} ${LEVEL}: $(hostname) ${SERVICE}: ${MSG}" >>$DEST
|
||||
fi
|
||||
}
|
||||
|
||||
#Get the day delta since the archive file backup
|
||||
seconds_difference() {
|
||||
ARCHIVE_DATE=$( date --date="$1" +%s )
|
||||
if [[ $? -ne 0 ]]; then
|
||||
SECOND_DELTA=0
|
||||
fi
|
||||
CURRENT_DATE=$( date +%s )
|
||||
SECOND_DELTA=$(($CURRENT_DATE-$ARCHIVE_DATE))
|
||||
if [[ "$SECOND_DELTA" -lt 0 ]]; then
|
||||
SECOND_DELTA=0
|
||||
fi
|
||||
echo $SECOND_DELTA
|
||||
}
|
||||
|
||||
# Send the specified tarball file at the specified filepath to the
|
||||
# remote gateway.
|
||||
send_to_remote_server() {
|
||||
FILEPATH=$1
|
||||
FILE=$2
|
||||
|
||||
# Grab the list of containers on the remote site
|
||||
RESULT=$(openstack container list 2>&1)
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo $RESULT | grep $CONTAINER_NAME
|
||||
if [[ $? -ne 0 ]]; then
|
||||
# Find the swift URL from the keystone endpoint list
|
||||
SWIFT_URL=$(openstack catalog list -f value | grep -A5 swift | grep public | awk '{print $2}')
|
||||
|
||||
# Get a token from keystone
|
||||
TOKEN=$(openstack token issue -f value -c id)
|
||||
|
||||
# Create the container
|
||||
RES_FILE=$(mktemp -p /tmp)
|
||||
curl -g -i -X PUT ${SWIFT_URL}/${CONTAINER_NAME} \
|
||||
-H "X-Auth-Token: ${TOKEN}" \
|
||||
-H "X-Storage-Policy: ${STORAGE_POLICY}" 2>&1 > $RES_FILE
|
||||
|
||||
if [[ $? -ne 0 || $(grep "HTTP" $RES_FILE | awk '{print $2}') -ge 400 ]]; then
|
||||
log ERROR "${DB_NAME}_backup" "Error creating container ${CONTAINER_NAME}"
|
||||
cat $RES_FILE
|
||||
rm -f $RES_FILE
|
||||
return 1
|
||||
fi
|
||||
rm -f $RES_FILE
|
||||
|
||||
swift stat $CONTAINER_NAME
|
||||
if [[ $? -ne 0 ]]; then
|
||||
log ERROR "${DB_NAME}_backup" "Error retrieving container ${CONTAINER_NAME} details after creation."
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo $RESULT | grep "HTTP 401"
|
||||
if [[ $? -eq 0 ]]; then
|
||||
log ERROR "${DB_NAME}_backup" "Access denied by keystone: ${RESULT}"
|
||||
return 1
|
||||
else
|
||||
echo $RESULT | grep -E "ConnectionError|Failed to discover available identity versions"
|
||||
if [[ $? -eq 0 ]]; then
|
||||
log ERROR "${DB_NAME}_backup" "Could not reach the RGW: ${RESULT}"
|
||||
# In this case, keystone or the site/node may be temporarily down.
|
||||
# Return slightly different error code so the calling code can retry
|
||||
return 2
|
||||
else
|
||||
log ERROR "${DB_NAME}_backup" "Could not get container list: ${RESULT}"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create an object to store the file
|
||||
openstack object create --name $FILE $CONTAINER_NAME $FILEPATH/$FILE || log ERROR "${DB_NAME}_backup" "Cannot create container object ${FILE}!"
|
||||
openstack object show $CONTAINER_NAME $FILE
|
||||
if [[ $? -ne 0 ]]; then
|
||||
log ERROR "${DB_NAME}_backup" "Error retrieving container object $FILE after creation."
|
||||
return 1
|
||||
fi
|
||||
|
||||
log INFO "${DB_NAME}_backup" "Created file $FILE in container $CONTAINER_NAME successfully."
|
||||
return 0
|
||||
}
|
||||
|
||||
# This function attempts to store the built tarball to the remote gateway,
|
||||
# with built-in logic to handle error cases like:
|
||||
# 1) Network connectivity issues - retries for a specific amount of time
|
||||
# 2) Authorization errors - immediately logs an ERROR and exits
|
||||
store_backup_remotely() {
|
||||
FILEPATH=$1
|
||||
FILE=$2
|
||||
|
||||
# If the RGW_TIMEOUT has already been set, use that value, otherwise give it
|
||||
# a default value.
|
||||
if [[ -z $RGW_TIMEOUT ]]; then
|
||||
RGW_TIMEOUT=1800
|
||||
fi
|
||||
|
||||
ERROR_SEEN=false
|
||||
DONE=false
|
||||
TIMEOUT_EXP=$(( $(date +%s) + $RGW_TIMEOUT ))
|
||||
while [[ $DONE == "false" ]]; do
|
||||
# Store the new archive to the remote backup storage facility.
|
||||
send_to_remote_server $FILEPATH $FILE
|
||||
|
||||
# Check if successful
|
||||
if [[ $? -eq 0 ]]; then
|
||||
log INFO "${DB_NAME}_backup" "Backup file ${FILE} successfully sent to RGW."
|
||||
DONE=true
|
||||
elif [[ $? -eq 2 ]]; then
|
||||
# Temporary failure occurred. We need to retry if we have not timed out
|
||||
log WARN "${DB_NAME}_backup" "Backup file ${FILE} could not be sent to RGW due to connection issue."
|
||||
DELTA=$(( TIMEOUT_EXP - $(date +%s) ))
|
||||
if [[ $DELTA -lt 0 ]]; then
|
||||
DONE=true
|
||||
log ERROR "${DB_NAME}_backup" "Timed out waiting for RGW to become available."
|
||||
ERROR_SEEN=true
|
||||
else
|
||||
log INFO "${DB_NAME}_backup" "Sleeping 30 seconds waiting for RGW to become available..."
|
||||
sleep 30
|
||||
log INFO "${DB_NAME}_backup" "Retrying..."
|
||||
fi
|
||||
else
|
||||
log ERROR "${DB_NAME}_backup" "Backup file ${FILE} could not be sent to the RGW."
|
||||
ERROR_SEEN=true
|
||||
DONE=true
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $ERROR_SEEN == "true" ]]; then
|
||||
log ERROR "${DB_NAME}_backup" "Errors encountered. Exiting."
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
remove_old_local_archives() {
|
||||
log INFO "${DB_NAME}_backup" "Deleting backups older than ${LOCAL_DAYS_TO_KEEP} days"
|
||||
if [[ -d $ARCHIVE_DIR ]]; then
|
||||
for ARCHIVE_FILE in $(ls -1 $ARCHIVE_DIR/*.gz); do
|
||||
ARCHIVE_DATE=$( echo $ARCHIVE_FILE | awk -F/ '{print $NF}' | cut -d'.' -f 4)
|
||||
if [[ "$(seconds_difference $ARCHIVE_DATE)" -gt "$(($LOCAL_DAYS_TO_KEEP*86400))" ]]; then
|
||||
log INFO "${DB_NAME}_backup" "Deleting file $ARCHIVE_FILE."
|
||||
rm -rf $ARCHIVE_FILE
|
||||
if [[ $? -ne 0 ]]; then
|
||||
# Log error but don't exit so we can finish the script
|
||||
# because at this point we haven't sent backup to RGW yet
|
||||
log ERROR "${DB_NAME}_backup" "Cannot remove ${ARCHIVE_FILE}"
|
||||
fi
|
||||
else
|
||||
log INFO "${DB_NAME}_backup" "Keeping file ${ARCHIVE_FILE}."
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
remove_old_remote_archives() {
|
||||
log INFO "${DB_NAME}_backup" "Deleting backups older than ${REMOTE_DAYS_TO_KEEP} days"
|
||||
BACKUP_FILES=$(mktemp -p /tmp)
|
||||
DB_BACKUP_FILES=$(mktemp -p /tmp)
|
||||
|
||||
openstack object list $CONTAINER_NAME > $BACKUP_FILES
|
||||
if [[ $? -ne 0 ]]; then
|
||||
log_backup_error_exit "Could not obtain a list of current backup files in the RGW" 1
|
||||
fi
|
||||
|
||||
# Filter out other types of backup files
|
||||
cat $BACKUP_FILES | grep $DB_NAME | grep $DB_NAMESPACE | awk '{print $2}' > $DB_BACKUP_FILES
|
||||
|
||||
for ARCHIVE_FILE in $(cat $DB_BACKUP_FILES); do
|
||||
ARCHIVE_DATE=$( echo $ARCHIVE_FILE | awk -F/ '{print $NF}' | cut -d'.' -f 4)
|
||||
if [[ "$(seconds_difference ${ARCHIVE_DATE})" -gt "$((${REMOTE_DAYS_TO_KEEP}*86400))" ]]; then
|
||||
log INFO "${DB_NAME}_backup" "Deleting file ${ARCHIVE_FILE} from the RGW"
|
||||
openstack object delete $CONTAINER_NAME $ARCHIVE_FILE || log_backup_error_exit "Cannot delete container object ${ARCHIVE_FILE}!" 1
|
||||
fi
|
||||
done
|
||||
|
||||
# Cleanup now that we're done.
|
||||
rm -f $BACKUP_FILES $DB_BACKUP_FILES
|
||||
}
|
||||
|
||||
# Main function to backup the databases. Calling functions need to supply:
|
||||
# 1) The directory where the final backup will be kept after it is compressed.
|
||||
# 2) A temporary directory to use for placing database files to be compressed.
|
||||
# Note: this temp directory will be deleted after backup is done.
|
||||
backup_databases() {
|
||||
# Create necessary directories if they do not exist.
|
||||
mkdir -p $ARCHIVE_DIR || log_backup_error_exit "Cannot create directory ${ARCHIVE_DIR}!"
|
||||
export TMP_DIR=$(mktemp -d) || log_backup_error_exit "Cannot create temp directory!"
|
||||
|
||||
# Create temporary log file
|
||||
export ERR_LOG_FILE=$(mktemp -p /tmp) || log_backup_error_exit "Cannot create log file!"
|
||||
|
||||
# It is expected that this function will dump the database files to the $TMP_DIR
|
||||
dump_databases_to_directory $TMP_DIR $ERR_LOG_FILE
|
||||
|
||||
# If successful, there should be at least one file in the TMP_DIR
|
||||
if [[ $? -ne 0 || $(ls $TMP_DIR | wc -w) -eq 0 ]]; then
|
||||
cat $ERR_LOG_FILE
|
||||
log_backup_error_exit "Backup of the ${DB_NAME} database failed and needs attention."
|
||||
fi
|
||||
|
||||
log INFO "${DB_NAME}_backup" "Databases dumped successfully. Creating tarball..."
|
||||
|
||||
NOW=$(date +"%Y-%m-%dT%H:%M:%SZ")
|
||||
TARBALL_FILE="${DB_NAME}.${DB_NAMESPACE}.all.${NOW}.tar.gz"
|
||||
|
||||
cd $TMP_DIR || log_backup_error_exit "Cannot change to directory $TMP_DIR"
|
||||
|
||||
#Archive the current database files
|
||||
tar zcvf $ARCHIVE_DIR/$TARBALL_FILE *
|
||||
if [[ $? -ne 0 ]]; then
|
||||
log_backup_error_exit "Backup tarball could not be created."
|
||||
fi
|
||||
|
||||
# Get the size of the file
|
||||
ARCHIVE_SIZE=$(ls -l $ARCHIVE_DIR/$TARBALL_FILE | awk '{print $5}')
|
||||
|
||||
log INFO "${DB_NAME}_backup" "Tarball $TARBALL_FILE created successfully."
|
||||
|
||||
cd $ARCHIVE_DIR
|
||||
|
||||
# Remove the temporary directory and files as they are no longer needed.
|
||||
rm -rf $TMP_DIR
|
||||
rm -f $ERR_LOG_FILE
|
||||
|
||||
#Only delete the old archive after a successful archive
|
||||
if [[ "$LOCAL_DAYS_TO_KEEP" -gt 0 ]]; then
|
||||
remove_old_local_archives
|
||||
fi
|
||||
|
||||
if $REMOTE_BACKUP_ENABLED; then
|
||||
store_backup_remotely $ARCHIVE_DIR $TARBALL_FILE
|
||||
if [[ $? -ne 0 ]]; then
|
||||
log_backup_error_exit "Backup could not be sent to remote RGW."
|
||||
fi
|
||||
|
||||
#Only delete the old archive after a successful archive
|
||||
if [[ "$REMOTE_DAYS_TO_KEEP" -gt 0 ]]; then
|
||||
remove_old_remote_archives
|
||||
fi
|
||||
|
||||
# Turn off trace just for a clearer printout of backup status - for manual backups, mainly.
|
||||
set +x
|
||||
echo "=================================================================="
|
||||
echo "Local backup and backup to remote RGW successful!"
|
||||
echo "Backup archive name: $TARBALL_FILE"
|
||||
echo "Backup archive size: $ARCHIVE_SIZE"
|
||||
echo "=================================================================="
|
||||
set -x
|
||||
else
|
||||
# Remote backup is not enabled. This is ok; at least we have a local backup.
|
||||
log INFO "${DB_NAME}_backup" "Skipping remote backup, as it is not enabled."
|
||||
|
||||
# Turn off trace just for a clearer printout of backup status - for manual backups, mainly.
|
||||
set +x
|
||||
echo "=================================================================="
|
||||
echo "Local backup successful!"
|
||||
echo "Backup archive name: $TARBALL_FILE"
|
||||
echo "Backup archive size: $ARCHIVE_SIZE"
|
||||
echo "=================================================================="
|
||||
set -x
|
||||
fi
|
||||
}
|
||||
{{- end }}
|
375
helm-toolkit/templates/scripts/db-backup-restore/_restore_main.sh.tpl
Executable file
375
helm-toolkit/templates/scripts/db-backup-restore/_restore_main.sh.tpl
Executable file
@ -0,0 +1,375 @@
|
||||
{{- define "helm-toolkit.scripts.db-backup-restore.restore_main" }}
|
||||
#!/bin/bash
|
||||
|
||||
# This file contains a database restore framework which database scripts
|
||||
# can use to perform a backup. The idea here is that the database-specific
|
||||
# functions will be implemented by the various databases using this script
|
||||
# (like mariadb, postgresql or etcd for example). The database-specific
|
||||
# script will need to first "source" this file like this:
|
||||
# source /tmp/restore_main.sh
|
||||
#
|
||||
# Then the script should call the main CLI function (cli_main):
|
||||
# cli_main <arg_list>
|
||||
# where:
|
||||
# <arg_list> is the list of arguments given by the user
|
||||
#
|
||||
# The framework will require the following variables to be exported:
|
||||
#
|
||||
# export DB_NAMESPACE Namespace where the database(s) reside
|
||||
# export DB_NAME Name of the database system
|
||||
# export ARCHIVE_DIR Location where the backup tarballs should
|
||||
# be stored. (full directory path which
|
||||
# should already exist)
|
||||
# export CONTAINER_NAME Name of the container on the RGW where
|
||||
# the backups are stored.
|
||||
# RGW access variables:
|
||||
# export OS_REGION_NAME Name of the region the RGW resides in
|
||||
# export OS_AUTH_URL Keystone URL associated with the RGW
|
||||
# export OS_PROJECT_NAME Name of the project associated with the
|
||||
# keystone user
|
||||
# export OS_USERNAME Name of the keystone user
|
||||
# export OS_PASSWORD Password of the keystone user
|
||||
# export OS_USER_DOMAIN_NAME Keystone domain the project belongs to
|
||||
# export OS_PROJECT_DOMAIN_NAME Keystone domain the user belongs to
|
||||
# export OS_IDENTITY_API_VERSION Keystone API version to use
|
||||
#
|
||||
# The database-specific functions that need to be implemented are:
|
||||
# get_databases
|
||||
# where:
|
||||
# <tmp_dir> is the full directory path where the decompressed
|
||||
# database files reside
|
||||
# <db_file> is the full path of the file to write the database
|
||||
# names into, one database per line
|
||||
# returns: 0 if no errors; 1 if any errors occurred
|
||||
#
|
||||
# This function is expected to extract the database names from the
|
||||
# uncompressed database files found in the given "tmp_dir", which is
|
||||
# the staging directory for database restore. The database names
|
||||
# should be written to the given "db_file", one database name per
|
||||
# line.
|
||||
#
|
||||
# restore_single_db
|
||||
# where:
|
||||
# <db_name> is the name of the database to be restored
|
||||
# <tmp_dir> is the full directory path where the decompressed
|
||||
# database files reside
|
||||
# returns: 0 if no errors; 1 if any errors occurred
|
||||
#
|
||||
# This function is expected to restore the database given as "db_name"
|
||||
# using the database files located in the "tmp_dir". The framework
|
||||
# will delete the "tmp_dir" and the files in it after the restore is
|
||||
# complete.
|
||||
#
|
||||
# restore_all_dbs
|
||||
# where:
|
||||
# <tmp_dir> is the full directory path where the decompressed
|
||||
# database files reside
|
||||
# returns: 0 if no errors; 1 if any errors occurred
|
||||
#
|
||||
# This function is expected to restore all of the databases which
|
||||
# are backed up in the database files located in the "tmp_dir". The
|
||||
# framework will delete the "tmp_dir" and the files in it after the
|
||||
# restore is complete.
|
||||
#
|
||||
# The functions in this file will take care of:
|
||||
# 1) The CLI parameter parsing for the arguments passed in by the user.
|
||||
# 2) The listing of either local or remote archive files at the request
|
||||
# of the user.
|
||||
# 3) The retrieval/download of an archive file located either in the local
|
||||
# file system or remotely stored on an RGW.
|
||||
# 4) Calling either "restore_single_db" or "restore_all_dbs" when the user
|
||||
# chooses to restore a database or all databases.
|
||||
# 5) The framework will call "get_databases" when it needs a list of
|
||||
# databases when the user requests a database list or when the user
|
||||
# requests to restore a single database (to ensure it exists in the
|
||||
# archive).
|
||||
#
|
||||
|
||||
export LOG_FILE=/tmp/dbrestore.log
|
||||
|
||||
usage() {
|
||||
ret_val=$1
|
||||
echo "Usage:"
|
||||
echo "Restore command options"
|
||||
echo "============================="
|
||||
echo "help"
|
||||
echo "list_archives [remote]"
|
||||
echo "list_databases <archive_filename> [remote]"
|
||||
echo "restore <archive_filename> <db_specifier> [remote]"
|
||||
echo " where <db_specifier> = <dbname> | ALL"
|
||||
clean_and_exit $ret_val ""
|
||||
}
|
||||
|
||||
#Exit cleanly with some message and return code
|
||||
clean_and_exit() {
|
||||
RETCODE=$1
|
||||
MSG=$2
|
||||
|
||||
# Clean/remove temporary directories/files
|
||||
rm -rf $TMP_DIR
|
||||
rm -f $DB_FILE
|
||||
|
||||
if [[ "x${MSG}" != "x" ]]; then
|
||||
echo $MSG
|
||||
fi
|
||||
exit $RETCODE
|
||||
}
|
||||
|
||||
# Retrieve a list of archives from the RGW.
|
||||
retrieve_remote_listing() {
|
||||
RESULT=$(openstack container show $CONTAINER_NAME 2>&1)
|
||||
if [[ $? -eq 0 ]]; then
|
||||
# Get the list, ensureing that we only pick up the right kind of backups from the
|
||||
# requested namespace
|
||||
openstack object list $CONTAINER_NAME | grep $DB_NAME | grep $DB_NAMESPACE | awk '{print $2}' > $TMP_DIR/archive_list
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Container object listing could not be obtained."
|
||||
return 1
|
||||
else
|
||||
echo "Archive listing successfully retrieved."
|
||||
fi
|
||||
else
|
||||
echo $RESULT | grep "HTTP 401"
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "Could not access the container: ${RESULT}"
|
||||
return 1
|
||||
else
|
||||
echo $RESULT | grep "ConnectionError"
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "Could not reach the RGW: ${RESULT}"
|
||||
# In this case, keystone or the site/node may be temporarily down.
|
||||
# Return slightly different error code so the calling code can retry
|
||||
return 2
|
||||
else
|
||||
echo "Container $CONTAINER_NAME does not exist: ${RESULT}"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# Retrieve a single archive from the RGW.
|
||||
retrieve_remote_archive() {
|
||||
ARCHIVE=$1
|
||||
|
||||
RESULT=$(openstack object save --file $TMP_DIR/$ARCHIVE $CONTAINER_NAME $ARCHIVE 2>&1)
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo $RESULT | grep "HTTP 401"
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "Could not access the archive: ${RESULT}"
|
||||
return 1
|
||||
else
|
||||
echo $RESULT | grep "ConnectionError"
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "Could not reach the RGW: ${RESULT}"
|
||||
# In this case, keystone or the site/node may be temporarily down.
|
||||
# Return slightly different error code so the calling code can retry
|
||||
return 2
|
||||
else
|
||||
echo "Archive ${ARCHIVE} could not be retrieved: ${RESULT}"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "Archive $ARCHIVE successfully retrieved."
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# Display all archives
|
||||
list_archives() {
|
||||
REMOTE=$1
|
||||
|
||||
if [[ "x${REMOTE^^}" == "xREMOTE" ]]; then
|
||||
retrieve_remote_listing
|
||||
if [[ $? -eq 0 && -e $TMP_DIR/archive_list ]]; then
|
||||
echo
|
||||
echo "All Archives from RGW Data Store"
|
||||
echo "=============================================="
|
||||
cat $TMP_DIR/archive_list
|
||||
clean_and_exit 0 ""
|
||||
else
|
||||
clean_and_exit 1 "ERROR: Archives could not be retrieved from the RGW."
|
||||
fi
|
||||
elif [[ "x${REMOTE}" == "x" ]]; then
|
||||
if [[ -d $ARCHIVE_DIR ]]; then
|
||||
archives=$(find $ARCHIVE_DIR/ -iname "*.gz" -print)
|
||||
echo
|
||||
echo "All Local Archives"
|
||||
echo "=============================================="
|
||||
for archive in $archives
|
||||
do
|
||||
echo $archive | cut -d '/' -f 8
|
||||
done
|
||||
clean_and_exit 0 ""
|
||||
else
|
||||
clean_and_exit 1 "ERROR: Local archive directory is not available."
|
||||
fi
|
||||
else
|
||||
usage 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Retrieve the archive from the desired location and decompress it into
|
||||
# the restore directory
|
||||
get_archive() {
|
||||
ARCHIVE_FILE=$1
|
||||
REMOTE=$2
|
||||
|
||||
if [[ "x$REMOTE" == "xremote" ]]; then
|
||||
retrieve_remote_archive $ARCHIVE_FILE
|
||||
if [[ $? -ne 0 ]]; then
|
||||
clean_and_exit 1 "ERROR: Could not retrieve remote archive: $ARCHIVE_FILE"
|
||||
fi
|
||||
elif [[ "x$REMOTE" == "x" ]]; then
|
||||
if [[ -e $ARCHIVE_DIR/$ARCHIVE_FILE ]]; then
|
||||
cp $ARCHIVE_DIR/$ARCHIVE_FILE $TMP_DIR/$ARCHIVE_FILE
|
||||
if [[ $? -ne 0 ]]; then
|
||||
clean_and_exit 1 "ERROR: Could not copy local archive to restore directory."
|
||||
fi
|
||||
else
|
||||
clean_and_exit 1 "ERROR: Local archive file could not be found."
|
||||
fi
|
||||
else
|
||||
usage 1
|
||||
fi
|
||||
|
||||
echo "Decompressing archive $ARCHIVE_FILE..."
|
||||
cd $TMP_DIR
|
||||
tar zxvf - < $TMP_DIR/$ARCHIVE_FILE 1>/dev/null
|
||||
if [[ $? -ne 0 ]]; then
|
||||
clean_and_exit 1 "ERROR: Archive decompression failed."
|
||||
fi
|
||||
}
|
||||
|
||||
# Display all databases from an archive
|
||||
list_databases() {
|
||||
ARCHIVE_FILE=$1
|
||||
REMOTE=$2
|
||||
WHERE="local"
|
||||
|
||||
if [[ "x${REMOTE}" != "x" ]]; then
|
||||
WHERE="remote"
|
||||
fi
|
||||
|
||||
# Get the archive from the source location (local/remote)
|
||||
get_archive $ARCHIVE_FILE $REMOTE
|
||||
|
||||
# Expectation is that the database listing will be put into
|
||||
# the given file one database per line
|
||||
get_databases $TMP_DIR $DB_FILE
|
||||
if [[ "$?" -ne 0 ]]; then
|
||||
clean_and_exit 1 "ERROR: Could not list databases."
|
||||
fi
|
||||
|
||||
if [[ -f "$DB_FILE" ]]; then
|
||||
echo " "
|
||||
echo "Databases in the $WHERE archive $ARCHIVE_FILE"
|
||||
echo "================================================================================"
|
||||
cat $DB_FILE
|
||||
else
|
||||
echo "There is no database in the archive."
|
||||
fi
|
||||
}
|
||||
|
||||
# Return 1 if the given database exists in the database file. 0 otherwise.
|
||||
database_exists() {
|
||||
DB=$1
|
||||
|
||||
grep "${DB}" ${DB_FILE}
|
||||
if [[ $? -eq 0 ]]; then
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# This is the main CLI interpreter function
|
||||
cli_main() {
|
||||
ARGS=("$@")
|
||||
|
||||
# Create temp directory for a staging area to decompress files into
|
||||
export TMP_DIR=$(mktemp -d)
|
||||
|
||||
# Create a temp file for storing list of databases (if needed)
|
||||
export DB_FILE=$(mktemp -p /tmp)
|
||||
|
||||
if [[ ${#ARGS[@]} -gt 4 ]]; then
|
||||
usage 1
|
||||
elif [[ ${#ARGS[@]} -eq 1 ]]; then
|
||||
if [[ "${ARGS[0]}" == "list_archives" ]]; then
|
||||
list_archives
|
||||
clean_and_exit 0 ""
|
||||
elif [[ "${ARGS[0]}" == "help" ]]; then
|
||||
usage 0
|
||||
else
|
||||
usage 1
|
||||
fi
|
||||
elif [[ ${#ARGS[@]} -eq 2 ]]; then
|
||||
if [[ "${ARGS[0]}" == "list_databases" ]]; then
|
||||
list_databases ${ARGS[1]}
|
||||
clean_and_exit 0 ""
|
||||
elif [[ "${ARGS[0]}" == "list_archives" ]]; then
|
||||
list_archives ${ARGS[1]}
|
||||
clean_and_exit 0 ""
|
||||
else
|
||||
usage 1
|
||||
fi
|
||||
elif [[ ${#ARGS[@]} -eq 3 || ${#ARGS[@]} -eq 4 ]]; then
|
||||
if [[ "${ARGS[0]}" == "list_databases" ]]; then
|
||||
list_databases ${ARGS[1]} ${ARGS[2]}
|
||||
clean_and_exit 0 ""
|
||||
elif [[ "${ARGS[0]}" != "restore" ]]; then
|
||||
usage 1
|
||||
else
|
||||
ARCHIVE=${ARGS[1]}
|
||||
DB_SPEC=${ARGS[2]}
|
||||
REMOTE=""
|
||||
if [[ ${#ARGS[@]} -eq 4 ]]; then
|
||||
REMOTE=${ARGS[3]}
|
||||
fi
|
||||
|
||||
#Get all the databases in that archive
|
||||
get_archive $ARCHIVE $REMOTE
|
||||
|
||||
if [[ "$( echo $DB_SPEC | tr '[a-z]' '[A-Z]')" != "ALL" ]]; then
|
||||
# Expectation is that the database listing will be put into
|
||||
# the given file one database per line
|
||||
get_databases $TMP_DIR $DB_FILE
|
||||
if [[ "$?" -ne 0 ]]; then
|
||||
clean_and_exit 1 "ERROR: Could not get the list of databases to restore."
|
||||
fi
|
||||
|
||||
#check if the requested database is available in the archive
|
||||
database_exists $DB_SPEC
|
||||
if [[ $? -ne 1 ]]; then
|
||||
clean_and_exit 1 "ERROR: Database ${DB_SPEC} does not exist."
|
||||
fi
|
||||
|
||||
echo "Restoring Database $DB_SPEC And Grants"
|
||||
restore_single_db $DB_SPEC $TMP_DIR
|
||||
if [[ "$?" -eq 0 ]]; then
|
||||
echo "Single database restored successfully."
|
||||
else
|
||||
clean_and_exit 1 "ERROR: Single database restore failed."
|
||||
fi
|
||||
echo "Tail ${LOG_FILE} for restore log."
|
||||
clean_and_exit 0 ""
|
||||
else
|
||||
echo "Restoring All The Databases. This could take a few minutes..."
|
||||
restore_all_dbs $TMP_DIR
|
||||
if [[ "$?" -eq 0 ]]; then
|
||||
echo "All databases restored successfully."
|
||||
else
|
||||
clean_and_exit 1 "ERROR: Database restore failed."
|
||||
fi
|
||||
clean_and_exit 0 "Tail ${LOG_FILE} for restore log."
|
||||
fi
|
||||
fi
|
||||
else
|
||||
usage 1
|
||||
fi
|
||||
|
||||
clean_and_exit 0 "Done"
|
||||
}
|
||||
{{- end }}
|
@ -12,6 +12,9 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# This is needed to get the postgresql admin password
|
||||
# Turn off tracing so the password doesn't get printed.
|
||||
set +x
|
||||
export PGPASSWORD=$(cat /etc/postgresql/admin_user.conf \
|
||||
| grep postgres | awk -F: '{print $5}')
|
||||
|
||||
@ -19,111 +22,45 @@ export PGPASSWORD=$(cat /etc/postgresql/admin_user.conf \
|
||||
# is needed.
|
||||
set -x
|
||||
|
||||
PG_DUMPALL_OPTIONS=$POSTGRESQL_BACKUP_PG_DUMPALL_OPTIONS
|
||||
TMP_DIR=/tmp/pg_backup
|
||||
BACKUPS_DIR=${POSTGRESQL_BACKUP_BASE_DIR}/db/${POSTGRESQL_POD_NAMESPACE}/postgres/current
|
||||
ARCHIVE_DIR=${POSTGRESQL_BACKUP_BASE_DIR}/db/${POSTGRESQL_POD_NAMESPACE}/postgres/archive
|
||||
LOG_FILE=/tmp/dberror.log
|
||||
PG_DUMPALL="pg_dumpall \
|
||||
$POSTGRESQL_BACKUP_PG_DUMPALL_OPTIONS \
|
||||
-U $POSTGRESQL_BACKUP_USER \
|
||||
-h $POSTGRESQL_SERVICE_HOST"
|
||||
source /tmp/backup_main.sh
|
||||
|
||||
source /tmp/common_backup_restore.sh
|
||||
# Export the variables required by the framework
|
||||
# Note: REMOTE_BACKUP_ENABLED and CONTAINER_NAME are already exported
|
||||
export DB_NAMESPACE=${POSTGRESQL_POD_NAMESPACE}
|
||||
export DB_NAME="postgres"
|
||||
export LOCAL_DAYS_TO_KEEP=$POSTGRESQL_LOCAL_BACKUP_DAYS_TO_KEEP
|
||||
export REMOTE_DAYS_TO_KEEP=$POSTGRESQL_REMOTE_BACKUP_DAYS_TO_KEEP
|
||||
export ARCHIVE_DIR=${POSTGRESQL_BACKUP_BASE_DIR}/db/${DB_NAMESPACE}/${DB_NAME}/archive
|
||||
|
||||
# Create necessary directories if they do not exist.
|
||||
mkdir -p $BACKUPS_DIR || log_backup_error_exit "Cannot create directory ${BACKUPS_DIR}!"
|
||||
mkdir -p $ARCHIVE_DIR || log_backup_error_exit "Cannot create directory ${ARCHIVE_DIR}!"
|
||||
mkdir -p $TMP_DIR || log_backup_error_exit "Cannot create directory ${TMP_DIR}!"
|
||||
# This function dumps all database files to the $TMP_DIR that is being
|
||||
# used as a staging area for preparing the backup tarball. Log file to
|
||||
# write to is passed in - the framework will expect that file to have any
|
||||
# errors that occur if the database dump is unsuccessful, so that it can
|
||||
# add the file contents to its own logs.
|
||||
dump_databases_to_directory() {
|
||||
TMP_DIR=$1
|
||||
LOG_FILE=$2
|
||||
|
||||
# Remove temporary directory contents.
|
||||
rm -rf $BACKUPS_DIR/* || log_backup_error_exit "Cannot clear ${BACKUPS_DIR} directory contents!"
|
||||
rm -rf $TMP_DIR/* || log_backup_error_exit "Cannot clear ${TMP_DIR} directory contents!"
|
||||
PG_DUMPALL_OPTIONS=$POSTGRESQL_BACKUP_PG_DUMPALL_OPTIONS
|
||||
PG_DUMPALL="pg_dumpall \
|
||||
$POSTGRESQL_BACKUP_PG_DUMPALL_OPTIONS \
|
||||
-U $POSTGRESQL_ADMIN_USER \
|
||||
-h $POSTGRESQL_SERVICE_HOST"
|
||||
|
||||
NOW=$(date +"%Y-%m-%dT%H:%M:%SZ")
|
||||
SQL_FILE=postgres.$POSTGRESQL_POD_NAMESPACE.all
|
||||
TARBALL_FILE=${SQL_FILE}.${NOW}.tar.gz
|
||||
SQL_FILE=postgres.$POSTGRESQL_POD_NAMESPACE.all
|
||||
|
||||
cd $TMP_DIR || log_backup_error_exit "Cannot change to directory $TMP_DIR"
|
||||
cd $TMP_DIR
|
||||
|
||||
rm -f $LOG_FILE
|
||||
|
||||
#Dump all databases
|
||||
$PG_DUMPALL --file=${TMP_DIR}/${SQL_FILE}.sql 2>>$LOG_FILE
|
||||
if [[ $? -eq 0 && -s "${TMP_DIR}/${SQL_FILE}.sql" ]]
|
||||
then
|
||||
log INFO postgresql_backup "Databases dumped successfully. Creating tarball..."
|
||||
|
||||
#Archive the current database files
|
||||
tar zcvf $ARCHIVE_DIR/$TARBALL_FILE *
|
||||
if [[ $? -ne 0 ]]
|
||||
then
|
||||
log_backup_error_exit "Backup tarball could not be created."
|
||||
fi
|
||||
|
||||
log INFO postgresql_backup "Tarball $TARBALL_FILE created successfully."
|
||||
|
||||
# Remove the sql files as they are no longer needed.
|
||||
rm -rf $TMP_DIR/*
|
||||
|
||||
if {{ .Values.conf.backup.remote_backup.enabled }}
|
||||
then
|
||||
# Copy the tarball back to the BACKUPS_DIR so that the other container
|
||||
# can access it for sending it to remote storage.
|
||||
cp $ARCHIVE_DIR/$TARBALL_FILE $BACKUPS_DIR/$TARBALL_FILE
|
||||
|
||||
if [[ $? -ne 0 ]]
|
||||
then
|
||||
log_backup_error_exit "Backup tarball could not be copied to backup directory ${BACKUPS_DIR}."
|
||||
fi
|
||||
|
||||
# Sleep for a few seconds to allow the file system to get caught up...also to
|
||||
# help prevent race condition where the other container grabs the backup_completed
|
||||
# token and the backup file hasn't completed writing to disk.
|
||||
sleep 30
|
||||
|
||||
# Note: this next line is the trigger that tells the other container to
|
||||
# start sending to remote storage. After this backup is sent to remote
|
||||
# storage, the other container will delete the "current" backup.
|
||||
touch $BACKUPS_DIR/backup_completed
|
||||
#Dump all databases
|
||||
$PG_DUMPALL --file=${TMP_DIR}/${SQL_FILE}.sql 2>>$LOG_FILE
|
||||
if [[ $? -eq 0 && -s "${TMP_DIR}/${SQL_FILE}.sql" ]]; then
|
||||
log INFO postgresql_backup "Databases dumped successfully."
|
||||
return 0
|
||||
else
|
||||
# Remote backup is not enabled. This is ok; at least we have a local backup.
|
||||
log INFO postgresql_backup "Skipping remote backup, as it is not enabled."
|
||||
log ERROR "Backup of the postgresql database failed and needs attention."
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
cat $LOG_FILE
|
||||
rm $LOG_FILE
|
||||
log_backup_error_exit "Backup of the postgresql database failed and needs attention."
|
||||
fi
|
||||
}
|
||||
|
||||
#Only delete the old archive after a successful archive
|
||||
if [ "$POSTGRESQL_BACKUP_DAYS_TO_KEEP" -gt 0 ]
|
||||
then
|
||||
log INFO postgresql_backup "Deleting backups older than ${POSTGRESQL_BACKUP_DAYS_TO_KEEP} days"
|
||||
if [ -d $ARCHIVE_DIR ]
|
||||
then
|
||||
for ARCHIVE_FILE in $(ls -1 $ARCHIVE_DIR/*.gz)
|
||||
do
|
||||
ARCHIVE_DATE=$( echo $ARCHIVE_FILE | awk -F/ '{print $NF}' | cut -d'.' -f 4)
|
||||
if [ "$(seconds_difference $ARCHIVE_DATE)" -gt "$(($POSTGRESQL_BACKUP_DAYS_TO_KEEP*86400))" ]
|
||||
then
|
||||
log INFO postgresql_backup "Deleting file $ARCHIVE_FILE."
|
||||
rm -rf $ARCHIVE_FILE
|
||||
if [[ $? -ne 0 ]]
|
||||
fhen
|
||||
rm -rf $BACKUPS_DIR/*
|
||||
log_backup_error_exit "Cannot remove ${ARCHIVE_FILE}"
|
||||
fi
|
||||
else
|
||||
log INFO postgresql_backup "Keeping file ${ARCHIVE_FILE}."
|
||||
fi
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
# Turn off trace just for a clearer printout of backup status - for manual backups, mainly.
|
||||
set +x
|
||||
echo "=================================================================="
|
||||
echo "Backup successful!"
|
||||
echo "Backup archive name: $TARBALL_FILE"
|
||||
echo "=================================================================="
|
||||
# Call main program to start the database backup
|
||||
backup_databases
|
||||
|
@ -12,359 +12,100 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# Capture the user's command line arguments
|
||||
ARGS=("$@")
|
||||
|
||||
# This is needed to get the postgresql admin password
|
||||
# Note: xtracing should be off so it doesn't print the pw
|
||||
export PGPASSWORD=$(cat /etc/postgresql/admin_user.conf \
|
||||
| grep postgres | awk -F: '{print $5}')
|
||||
|
||||
ARCHIVE_DIR=${POSTGRESQL_BACKUP_BASE_DIR}/db/${POSTGRESQL_POD_NAMESPACE}/postgres/archive
|
||||
RESTORE_DIR=${POSTGRESQL_BACKUP_BASE_DIR}/db/${POSTGRESQL_POD_NAMESPACE}/postgres/restore
|
||||
source /tmp/restore_main.sh
|
||||
|
||||
# Export the variables needed by the framework
|
||||
export DB_NAME="postgres"
|
||||
export DB_NAMESPACE=${POSTGRESQL_POD_NAMESPACE}
|
||||
export ARCHIVE_DIR=${POSTGRESQL_BACKUP_BASE_DIR}/db/${DB_NAMESPACE}/${DB_NAME}/archive
|
||||
|
||||
# Define variables needed in this file
|
||||
POSTGRESQL_HOST=$(cat /etc/postgresql/admin_user.conf | cut -d: -f 1)
|
||||
LOG_FILE=/tmp/dbrestore.log
|
||||
ARGS=("$@")
|
||||
PSQL="psql -U $POSTGRESQL_BACKUP_USER -h $POSTGRESQL_HOST"
|
||||
export PSQL="psql -U $POSTGRESQL_ADMIN_USER -h $POSTGRESQL_HOST"
|
||||
|
||||
source /tmp/common_backup_restore.sh
|
||||
|
||||
usage() {
|
||||
ret_val=$1
|
||||
echo "Usage:"
|
||||
echo "Restore command options"
|
||||
echo "============================="
|
||||
echo "help"
|
||||
echo "list_archives [remote]"
|
||||
echo "list_databases <archive_filename> [remote]"
|
||||
echo "restore <archive_filename> <db_specifier> [remote]"
|
||||
echo " where <db_specifier> = <dbname> | ALL"
|
||||
clean_and_exit $ret_val ""
|
||||
}
|
||||
|
||||
#Extract Single Database SQL Dump from pg_dumpall dump file
|
||||
extract_single_db_dump() {
|
||||
sed "/connect.*$2/,\$!d" $1 | sed "/PostgreSQL database dump complete/,\$d" > \
|
||||
${RESTORE_DIR}/$2.sql
|
||||
}
|
||||
|
||||
#Exit cleanly with some message and return code
|
||||
clean_and_exit() {
|
||||
RETCODE=$1
|
||||
MSG=$2
|
||||
|
||||
#Cleanup Restore Directory
|
||||
rm -rf $RESTORE_DIR/*
|
||||
|
||||
if [[ "x${MSG}" != "x" ]];
|
||||
then
|
||||
echo $MSG
|
||||
fi
|
||||
exit $RETCODE
|
||||
}
|
||||
|
||||
# Signal the other container that it should retrieve a list of archives
|
||||
# from the RGW.
|
||||
retrieve_remote_listing() {
|
||||
# Remove the last response, if there was any
|
||||
rm -rf $RESTORE_DIR/archive_list_*
|
||||
|
||||
# Signal by creating a file in the restore directory
|
||||
touch $RESTORE_DIR/archive_listing_request
|
||||
|
||||
# Wait until the archive listing has been retrieved from the other container.
|
||||
echo "Waiting for archive listing..."
|
||||
wait_for_file $RESTORE_DIR/archive_list_*
|
||||
|
||||
if [[ $? -eq 1 ]]
|
||||
then
|
||||
clean_and_exit 1 "Request failed - container did not respond. Archive listing is NOT available."
|
||||
fi
|
||||
|
||||
ERR=$(cat $RESTORE_DIR/archive_list_error 2>/dev/null)
|
||||
if [[ $? -eq 0 ]]
|
||||
then
|
||||
clean_and_exit 1 "Request failed - ${ERR}"
|
||||
fi
|
||||
|
||||
echo "Done waiting. Archive list is available."
|
||||
}
|
||||
|
||||
# Signal the other container that it should retrieve a single archive
|
||||
# from the RGW.
|
||||
retrieve_remote_archive() {
|
||||
ARCHIVE=$1
|
||||
|
||||
# Remove the last response, if there was any
|
||||
rm -rf $RESTORE_DIR/archive_*
|
||||
|
||||
# Signal by creating a file in the restore directory containing the archive
|
||||
# name.
|
||||
echo "$ARCHIVE" > $RESTORE_DIR/get_archive_request
|
||||
|
||||
# Wait until the archive has been retrieved from the other container.
|
||||
echo "Waiting for requested archive ${ARCHIVE}..."
|
||||
wait_for_file $RESTORE_DIR/archive_*
|
||||
|
||||
if [[ $? -eq 1 ]]
|
||||
then
|
||||
clean_and_exit 1 "Request failed - container did not respond. Archive ${ARCHIVE} is NOT available."
|
||||
fi
|
||||
|
||||
ERR=$(cat $RESTORE_DIR/archive_error 2>/dev/null)
|
||||
if [[ $? -eq 0 ]]
|
||||
then
|
||||
clean_and_exit 1 "Request failed - ${ERR}"
|
||||
fi
|
||||
|
||||
rm -rf $RESTORE_DIR/archive_response
|
||||
if [[ -e $RESTORE_DIR/$ARCHIVE ]]
|
||||
then
|
||||
echo "Done waiting. Archive $ARCHIVE is available."
|
||||
else
|
||||
clean_and_exit 1 "Request failed - Archive $ARCHIVE is NOT available."
|
||||
fi
|
||||
}
|
||||
|
||||
#Display all archives
|
||||
list_archives() {
|
||||
REMOTE=$1
|
||||
|
||||
if [[ "x${REMOTE^^}" == "xREMOTE" ]]
|
||||
then
|
||||
retrieve_remote_listing
|
||||
if [[ -e $RESTORE_DIR/archive_list_response ]]
|
||||
then
|
||||
echo
|
||||
echo "All Archives from RGW Data Store"
|
||||
echo "=============================================="
|
||||
cat $RESTORE_DIR/archive_list_response
|
||||
clean_and_exit 0 ""
|
||||
else
|
||||
clean_and_exit 1 "Archives could not be retrieved from the RGW."
|
||||
fi
|
||||
elif [[ "x${REMOTE}" == "x" ]]
|
||||
then
|
||||
if [ -d $ARCHIVE_DIR ]
|
||||
then
|
||||
archives=$(find $ARCHIVE_DIR/ -iname "*.gz" -print)
|
||||
echo
|
||||
echo "All Local Archives"
|
||||
echo "=============================================="
|
||||
for archive in $archives
|
||||
do
|
||||
echo $archive | cut -d '/' -f 8
|
||||
done
|
||||
clean_and_exit 0 ""
|
||||
else
|
||||
clean_and_exit 1 "Local archive directory is not available."
|
||||
fi
|
||||
else
|
||||
usage 1
|
||||
fi
|
||||
}
|
||||
|
||||
#Return all databases from an archive
|
||||
# Extract all databases from an archive and put them in the requested
|
||||
# file.
|
||||
get_databases() {
|
||||
ARCHIVE_FILE=$1
|
||||
REMOTE=$2
|
||||
TMP_DIR=$1
|
||||
DB_FILE=$2
|
||||
|
||||
if [[ "x$REMOTE" == "xremote" ]]
|
||||
then
|
||||
retrieve_remote_archive $ARCHIVE_FILE
|
||||
elif [[ "x$REMOTE" == "x" ]]
|
||||
then
|
||||
if [ -e $ARCHIVE_DIR/$ARCHIVE_FILE ]
|
||||
then
|
||||
cp $ARCHIVE_DIR/$ARCHIVE_FILE $RESTORE_DIR/$ARCHIVE_FILE
|
||||
if [[ $? != 0 ]]
|
||||
then
|
||||
clean_and_exit 1 "Could not copy local archive to restore directory."
|
||||
fi
|
||||
else
|
||||
clean_and_exit 1 "Local archive file could not be found."
|
||||
fi
|
||||
else
|
||||
usage 1
|
||||
fi
|
||||
|
||||
echo "Decompressing archive $ARCHIVE_FILE..."
|
||||
cd $RESTORE_DIR
|
||||
tar zxvf - < $RESTORE_DIR/$ARCHIVE_FILE 1>/dev/null
|
||||
SQL_FILE=postgres.$POSTGRESQL_POD_NAMESPACE.all.sql
|
||||
if [ -e $RESTORE_DIR/$SQL_FILE ]
|
||||
then
|
||||
DBS=$( grep 'CREATE DATABASE' $RESTORE_DIR/$SQL_FILE | awk '{ print $3 }' )
|
||||
if [[ -e $TMP_DIR/$SQL_FILE ]]; then
|
||||
grep 'CREATE DATABASE' $TMP_DIR/$SQL_FILE | awk '{ print $3 }' > $DB_FILE
|
||||
else
|
||||
DBS=" "
|
||||
# no databases - just touch the file
|
||||
touch $DB_FILE
|
||||
fi
|
||||
}
|
||||
|
||||
#Display all databases from an archive
|
||||
list_databases() {
|
||||
ARCHIVE_FILE=$1
|
||||
REMOTE=$2
|
||||
WHERE="local"
|
||||
|
||||
if [[ "x${REMOTE}" != "x" ]]
|
||||
then
|
||||
WHERE="remote"
|
||||
fi
|
||||
|
||||
get_databases $ARCHIVE_FILE $REMOTE
|
||||
if [ -n "$DBS" ]
|
||||
then
|
||||
echo " "
|
||||
echo "Databases in the $WHERE archive $ARCHIVE_FILE"
|
||||
echo "================================================================================"
|
||||
for db in $DBS
|
||||
do
|
||||
echo $db
|
||||
done
|
||||
else
|
||||
echo "There is no database in the archive."
|
||||
fi
|
||||
# Extract Single Database SQL Dump from pg_dumpall dump file
|
||||
extract_single_db_dump() {
|
||||
sed "/connect.*$2/,\$!d" $1 | sed "/PostgreSQL database dump complete/,\$d" > ${3}/$2.sql
|
||||
}
|
||||
|
||||
create_db_if_not_exist() {
|
||||
#Postgresql does not have the concept of creating
|
||||
#database if condition. This function help create
|
||||
#the database in case it does not exist
|
||||
$PSQL -tc "SELECT 1 FROM pg_database WHERE datname = '$1'" | grep -q 1 || \
|
||||
$PSQL -c "CREATE DATABASE $1"
|
||||
}
|
||||
|
||||
#Restore a single database dump from pg_dumpall dump.
|
||||
# Restore a single database dump from pg_dumpall sql dumpfile.
|
||||
restore_single_db() {
|
||||
SINGLE_DB_NAME=$1
|
||||
if [ -z "$SINGLE_DB_NAME" ]
|
||||
then
|
||||
usage 1
|
||||
fi
|
||||
TMP_DIR=$2
|
||||
|
||||
SQL_FILE=postgres.$POSTGRESQL_POD_NAMESPACE.all.sql
|
||||
if [ -f $RESTORE_DIR/$SQL_FILE ]
|
||||
then
|
||||
extract_single_db_dump $RESTORE_DIR/$SQL_FILE $SINGLE_DB_NAME
|
||||
if [[ -f $RESTORE_DIR/$SINGLE_DB_NAME.sql && -s $RESTORE_DIR/$SINGLE_DB_NAME.sql ]]
|
||||
then
|
||||
create_db_if_not_exist $single_db_name
|
||||
$PSQL -d $SINGLE_DB_NAME -f ${RESTORE_DIR}/${SINGLE_DB_NAME}.sql 2>>$LOG_FILE >> $LOG_FILE
|
||||
if [ "$?" -eq 0 ]
|
||||
then
|
||||
echo "Database Restore Successful."
|
||||
if [[ -f $TMP_DIR/$SQL_FILE ]]; then
|
||||
extract_single_db_dump $TMP_DIR/$SQL_FILE $SINGLE_DB_NAME $TMP_DIR
|
||||
if [[ -f $TMP_DIR/$SINGLE_DB_NAME.sql && -s $TMP_DIR/$SINGLE_DB_NAME.sql ]]; then
|
||||
# Postgresql does not have the concept of creating database if condition.
|
||||
# This next command creates the database in case it does not exist.
|
||||
$PSQL -tc "SELECT 1 FROM pg_database WHERE datname = '$SINGLE_DB_NAME'" | grep -q 1 || \
|
||||
$PSQL -c "CREATE DATABASE $SINGLE_DB_NAME"
|
||||
if [[ "$?" -ne 0 ]]; then
|
||||
echo "Could not create the single database being restored: ${SINGLE_DB_NAME}."
|
||||
return 1
|
||||
fi
|
||||
$PSQL -d $SINGLE_DB_NAME -f ${TMP_DIR}/${SINGLE_DB_NAME}.sql 2>>$LOG_FILE >> $LOG_FILE
|
||||
if [[ "$?" -eq 0 ]]; then
|
||||
echo "Database restore Successful."
|
||||
else
|
||||
clean_and_exit 1 "Database Restore Failed."
|
||||
echo "Database restore Failed."
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
clean_and_exit 1 "Database Dump For $SINGLE_DB_NAME is empty or not available."
|
||||
echo "Database dump For $SINGLE_DB_NAME is empty or not available."
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
clean_and_exit 1 "Database file for dump_all not available to restore from"
|
||||
echo "No database file available to restore from."
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
#Restore all the databases
|
||||
# Restore all the databases from the pg_dumpall sql file.
|
||||
restore_all_dbs() {
|
||||
TMP_DIR=$1
|
||||
|
||||
SQL_FILE=postgres.$POSTGRESQL_POD_NAMESPACE.all.sql
|
||||
if [ -f $RESTORE_DIR/$SQL_FILE ]
|
||||
then
|
||||
$PSQL postgres -f $RESTORE_DIR/$SQL_FILE 2>>$LOG_FILE >> $LOG_FILE
|
||||
if [ "$?" -eq 0 ]
|
||||
then
|
||||
if [[ -f $TMP_DIR/$SQL_FILE ]]; then
|
||||
$PSQL postgres -f $TMP_DIR/$SQL_FILE 2>>$LOG_FILE >> $LOG_FILE
|
||||
if [[ "$?" -eq 0 ]]; then
|
||||
echo "Database Restore successful."
|
||||
else
|
||||
clean_and_exit 1 "Database Restore failed."
|
||||
echo "Database Restore failed."
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
clean_and_exit 1 "There is no database file available to restore from"
|
||||
echo "There is no database file available to restore from."
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
|
||||
is_Option() {
|
||||
opts=$1
|
||||
param=$2
|
||||
find=0
|
||||
for opt in $opts
|
||||
do
|
||||
if [ "$opt" == "$param" ]
|
||||
then
|
||||
find=1
|
||||
fi
|
||||
done
|
||||
echo $find
|
||||
}
|
||||
|
||||
#Main
|
||||
#Create Restore Directory if it's not created already
|
||||
mkdir -p $RESTORE_DIR
|
||||
|
||||
#Cleanup Restore Directory
|
||||
rm -rf $RESTORE_DIR/*
|
||||
|
||||
if [ ${#ARGS[@]} -gt 4 ]
|
||||
then
|
||||
usage 1
|
||||
elif [ ${#ARGS[@]} -eq 1 ]
|
||||
then
|
||||
if [ "${ARGS[0]}" == "list_archives" ]
|
||||
then
|
||||
list_archives
|
||||
clean_and_exit 0 ""
|
||||
elif [ "${ARGS[0]}" == "help" ]
|
||||
then
|
||||
usage 0
|
||||
else
|
||||
usage 1
|
||||
fi
|
||||
elif [ ${#ARGS[@]} -eq 2 ]
|
||||
then
|
||||
if [ "${ARGS[0]}" == "list_databases" ]
|
||||
then
|
||||
list_databases ${ARGS[1]}
|
||||
clean_and_exit 0 ""
|
||||
elif [ "${ARGS[0]}" == "list_archives" ]
|
||||
then
|
||||
list_archives ${ARGS[1]}
|
||||
clean_and_exit 0 ""
|
||||
else
|
||||
usage 1
|
||||
fi
|
||||
elif [[ ${#ARGS[@]} -eq 3 ]] || [[ ${#ARGS[@]} -eq 4 ]]
|
||||
then
|
||||
if [ "${ARGS[0]}" == "list_databases" ]
|
||||
then
|
||||
list_databases ${ARGS[1]} ${ARGS[2]}
|
||||
clean_and_exit 0 ""
|
||||
elif [ "${ARGS[0]}" != "restore" ]
|
||||
then
|
||||
usage 1
|
||||
else
|
||||
ARCHIVE=${ARGS[1]}
|
||||
DB_SPEC=${ARGS[2]}
|
||||
REMOTE=""
|
||||
if [ ${#ARGS[@]} -eq 4 ]
|
||||
then
|
||||
REMOTE=${ARGS[3]}
|
||||
fi
|
||||
|
||||
#Get all the databases in that archive
|
||||
get_databases $ARCHIVE $REMOTE
|
||||
|
||||
#check if the requested database is available in the archive
|
||||
if [ $(is_Option "$DBS" $DB_SPEC) -eq 1 ]
|
||||
then
|
||||
echo "Restoring Database $DB_SPEC And Grants"
|
||||
restore_single_db $DB_SPEC
|
||||
echo "Tail ${LOG_FILE} for restore log."
|
||||
clean_and_exit 0 ""
|
||||
elif [ "$( echo $DB_SPEC | tr '[a-z]' '[A-Z]')" == "ALL" ]
|
||||
then
|
||||
echo "Restoring All The Databases. This could take a few minutes..."
|
||||
restore_all_dbs
|
||||
clean_and_exit 0 "Tail ${LOG_FILE} for restore log."
|
||||
else
|
||||
clean_and_exit 1 "There is no database with that name"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
usage 1
|
||||
fi
|
||||
|
||||
clean_and_exit 0 "Done"
|
||||
# Call the CLI interpreter, providing the archive directory path and the
|
||||
# user arguments passed in
|
||||
cli_main ${ARGS[@]}
|
||||
|
@ -32,9 +32,8 @@ data:
|
||||
{{- if .Values.conf.backup.enabled }}
|
||||
backup_postgresql.sh: {{ tuple "bin/_backup_postgresql.sh.tpl" . | include "helm-toolkit.utils.template" | b64enc }}
|
||||
restore_postgresql.sh: {{ tuple "bin/_restore_postgresql.sh.tpl" . | include "helm-toolkit.utils.template" | b64enc }}
|
||||
remote_store_postgresql.sh: {{ tuple "bin/_remote_store_postgresql.sh.tpl" . | include "helm-toolkit.utils.template" | b64enc }}
|
||||
remote_retrieve_postgresql.sh: {{ tuple "bin/_remote_retrieve_postgresql.sh.tpl" . | include "helm-toolkit.utils.template" | b64enc }}
|
||||
common_backup_restore.sh: {{ tuple "bin/_common_backup_restore.sh.tpl" . | include "helm-toolkit.utils.template" | b64enc }}
|
||||
backup_main.sh: {{ include "helm-toolkit.scripts.db-backup-restore.backup_main" . | b64enc }}
|
||||
restore_main.sh: {{ include "helm-toolkit.scripts.db-backup-restore.restore_main" . | b64enc }}
|
||||
{{- end }}
|
||||
{{- if .Values.manifests.job_ks_user }}
|
||||
ks-user.sh: {{ include "helm-toolkit.scripts.keystone_user" . | b64enc }}
|
||||
|
@ -48,6 +48,7 @@ spec:
|
||||
{{ tuple $envAll "postgresql-backup" "backup" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 12 }}
|
||||
spec:
|
||||
securityContext:
|
||||
runAsUser: 65534
|
||||
fsGroup: 999
|
||||
serviceAccountName: {{ $serviceAccountName }}
|
||||
restartPolicy: OnFailure
|
||||
@ -55,17 +56,17 @@ spec:
|
||||
{{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}
|
||||
containers:
|
||||
- name: postgresql-backup
|
||||
{{ tuple $envAll "postgresql" | include "helm-toolkit.snippets.image" | indent 14 }}
|
||||
{{ tuple $envAll "postgresql_backup" | include "helm-toolkit.snippets.image" | indent 14 }}
|
||||
{{ tuple $envAll $envAll.Values.pod.resources.jobs.postgresql_backup | include "helm-toolkit.snippets.kubernetes_resources" | indent 14 }}
|
||||
command:
|
||||
- /tmp/backup_postgresql.sh
|
||||
env:
|
||||
- name: POSTGRESQL_BACKUP_PASSWORD
|
||||
- name: POSTGRESQL_ADMIN_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: POSTGRES_PASSWORD
|
||||
name: postgresql-admin
|
||||
- name: POSTGRESQL_BACKUP_USER
|
||||
- name: POSTGRESQL_ADMIN_USER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: POSTGRES_USER
|
||||
@ -74,56 +75,42 @@ spec:
|
||||
value: {{ .Values.conf.backup.base_path }}
|
||||
- name: POSTGRESQL_BACKUP_PG_DUMPALL_OPTIONS
|
||||
value: {{ .Values.conf.backup.pg_dumpall_options }}
|
||||
- name: POSTGRESQL_BACKUP_DAYS_TO_KEEP
|
||||
value: "{{ .Values.conf.backup.days_of_backup_to_keep }}"
|
||||
- name: POSTGRESQL_LOCAL_BACKUP_DAYS_TO_KEEP
|
||||
value: "{{ .Values.conf.backup.days_to_keep }}"
|
||||
- name: POSTGRESQL_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: REMOTE_BACKUP_ENABLED
|
||||
value: "{{ .Values.conf.backup.remote_backup.enabled }}"
|
||||
{{- if .Values.conf.backup.remote_backup.enabled }}
|
||||
- name: POSTGRESQL_REMOTE_BACKUP_DAYS_TO_KEEP
|
||||
value: "{{ .Values.conf.backup.remote_backup.days_to_keep }}"
|
||||
- name: CONTAINER_NAME
|
||||
value: "{{ .Values.conf.backup.remote_backup.container_name }}"
|
||||
- name: STORAGE_POLICY
|
||||
value: "{{ .Values.conf.backup.remote_backup.storage_policy }}"
|
||||
{{- with $env := dict "ksUserSecret" $envAll.Values.secrets.identity.postgresql }}
|
||||
{{- include "helm-toolkit.snippets.keystone_openrc_env_vars" $env | indent 16 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- mountPath: /tmp/common_backup_restore.sh
|
||||
name: postgresql-bin
|
||||
readOnly: true
|
||||
subPath: common_backup_restore.sh
|
||||
- mountPath: /tmp/backup_postgresql.sh
|
||||
name: postgresql-bin
|
||||
readOnly: true
|
||||
subPath: backup_postgresql.sh
|
||||
- mountPath: /tmp/backup_main.sh
|
||||
name: postgresql-bin
|
||||
readOnly: true
|
||||
subPath: backup_main.sh
|
||||
- mountPath: {{ .Values.conf.backup.base_path }}
|
||||
name: postgresql-backup-dir
|
||||
- name: postgresql-secrets
|
||||
mountPath: /etc/postgresql/admin_user.conf
|
||||
subPath: admin_user.conf
|
||||
readOnly: true
|
||||
- name: postgresql-remote-store
|
||||
{{ tuple $envAll "postgresql_remote_store" | include "helm-toolkit.snippets.image" | indent 14 }}
|
||||
command:
|
||||
- /tmp/remote_store_postgresql.sh
|
||||
env:
|
||||
{{- with $env := dict "ksUserSecret" $envAll.Values.secrets.identity.postgresql }}
|
||||
{{- include "helm-toolkit.snippets.keystone_openrc_env_vars" $env | indent 16 }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_BACKUP_BASE_DIR
|
||||
value: {{ .Values.conf.backup.base_path }}
|
||||
- name: POSTGRESQL_BACKUP_DAYS_TO_KEEP
|
||||
value: "{{ .Values.conf.backup.days_of_backup_to_keep }}"
|
||||
- name: POSTGRESQL_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- mountPath: /tmp/common_backup_restore.sh
|
||||
name: postgresql-bin
|
||||
readOnly: true
|
||||
subPath: common_backup_restore.sh
|
||||
- mountPath: /tmp/remote_store_postgresql.sh
|
||||
name: postgresql-bin
|
||||
readOnly: true
|
||||
subPath: remote_store_postgresql.sh
|
||||
- mountPath: {{ .Values.conf.backup.base_path }}
|
||||
name: postgresql-backup-dir
|
||||
restartPolicy: OnFailure
|
||||
serviceAccount: {{ $serviceAccountName }}
|
||||
serviceAccountName: {{ $serviceAccountName }}
|
||||
|
@ -20,7 +20,7 @@ metadata:
|
||||
name: {{ $secretName }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{- $identityClass := .Values.endpoints.identity.auth.postgresql }}
|
||||
{{- $identityClass := index .Values.endpoints.identity.auth $userClass }}
|
||||
{{- if $identityClass.auth_url }}
|
||||
OS_AUTH_URL: {{ $identityClass.auth_url | b64enc }}
|
||||
{{- else }}
|
||||
@ -47,7 +47,7 @@ type: Opaque
|
||||
data:
|
||||
{{- $identityClass := index .Values.endpoints.identity.auth $userClass }}
|
||||
{{- if $identityClass.auth_url }}
|
||||
OS_AUTH_URL: {{ $identityClass.auth_url }}
|
||||
OS_AUTH_URL: {{ $identityClass.auth_url | b64enc }}
|
||||
{{- else }}
|
||||
OS_AUTH_URL: {{ tuple "identity" "internal" "api" $envAll | include "helm-toolkit.endpoints.keystone_endpoint_uri_lookup" | b64enc }}
|
||||
{{- end }}
|
||||
|
@ -132,7 +132,7 @@ images:
|
||||
ks_user: docker.io/openstackhelm/heat:stein-ubuntu_bionic
|
||||
prometheus_postgresql_exporter: docker.io/wrouesnel/postgres_exporter:v0.4.6
|
||||
prometheus_postgresql_exporter_create_user: "docker.io/postgres:9.5"
|
||||
postgresql_remote_store: docker.io/openstackhelm/heat:stein-ubuntu_bionic
|
||||
postgresql_backup: "quay.io/airshipit/porthole-postgresql-utility:latest-ubuntu_bionic"
|
||||
pull_policy: "IfNotPresent"
|
||||
local_registry:
|
||||
active: false
|
||||
@ -367,11 +367,14 @@ conf:
|
||||
backup:
|
||||
enabled: false
|
||||
base_path: /var/backup
|
||||
days_of_backup_to_keep: 3
|
||||
days_to_keep: 3
|
||||
pg_dumpall_options: null
|
||||
remote_backup:
|
||||
enabled: false
|
||||
container_name: postgresql
|
||||
days_to_keep: 14
|
||||
storage_policy: default-placement
|
||||
|
||||
exporter:
|
||||
queries:
|
||||
pg_replication:
|
||||
|
Loading…
Reference in New Issue
Block a user