Commit 88e2ee5c authored by Michał 'rysiek' Woźniak's avatar Michał 'rysiek' Woźniak
Browse files

initial commit\

parents
This diff is collapsed.
# General purpose utilities, mainly in Bash
These utilities might be useful for things ranging from database dumps/backups to making it possible to set certain programs that require arguments (`rsync`, `rsnapshot`) as the command for a key in SSH `athorized_keys`.
\ No newline at end of file
#!/bin/bash
#
# rsnapshot wrapper script
#
# handling cron-operated rsnapshot
# choosing the config based on RSNAPSHOT_CONFIG variable, if available
# otherwise using the default config in /etc/rsnapshot.conf
#
#
# we have to get info about the action to perform from somewhere
# luckily, SSH_ORIGINAL_COMMAND is there to help us
# usage example:
# ssh user@your-rsnapshot-host.example.com sync
if [ "$SSH_ORIGINAL_COMMAND" != 'sync' ] &&
[ "$SSH_ORIGINAL_COMMAND" != 'hourly' ] &&
[ "$SSH_ORIGINAL_COMMAND" != 'daily' ] &&
[ "$SSH_ORIGINAL_COMMAND" != 'weekly' ] &&
[ "$SSH_ORIGINAL_COMMAND" != 'monthly' ]; then
echo "ERROR: '$SSH_ORIGINAL_COMMAND' is not a valid rsnapshot command."
exit 1
fi
#
# config file -- if unset, set to default
# this means you can use "RSNAPSHOT_CONFIG=/some/path.conf /path/to/ssh-rsnapshot-wrapper.sh"
# as the authorized_keys key command
# TODO: why not use the regular args available $@ (and command would be: "/path/to/rsnapshot -vc /some/path.conf")?
[ -z ${RSNAPSHOT_CONFIG+x} ] && RSNAPSHOT_CONFIG="/etc/rsnapshot.conf"
exec /usr/bin/rsnapshot -vc "$RSNAPSHOT_CONFIG" "$SSH_ORIGINAL_COMMAND"
\ No newline at end of file
#!/bin/bash
#
# rsync backup script
# server-side
#
# to be used in authorized_keys as the explicit command allowed for a given key
#
#
# NOTICE:
# this script is being used as a wrapper for rsync in an authorized_keys command
# it *cannot* output *anything* to stdout, or rsync will complain and fail
#
# NOTICE:
# this *relies* on bash!
# especially the SSH_RSYNC_ALLOWED_SRC variable relies on the whole authorized_keys key command
# (not only this script!) being run in bash; remember to set the shell for the user appropriately!
# DEBUG
# you can enable debug by prepending this to the authorized_keys key command:
# "SSH_RSYNC_DEBUG=/path/to/debug.log "
[ -z ${SSH_RSYNC_DEBUG+x} ] || echo >> "$SSH_RSYNC_DEBUG"
[ -z ${SSH_RSYNC_DEBUG+x} ] || echo >> "$SSH_RSYNC_DEBUG"
[ -z ${SSH_RSYNC_DEBUG+x} ] || date >> "$SSH_RSYNC_DEBUG"
[ -z ${SSH_RSYNC_DEBUG+x} ] || echo "SSH_ORIGINAL_COMMAND: $SSH_ORIGINAL_COMMAND" >> "$SSH_RSYNC_DEBUG"
# let's check all the command elements, one by one
[[ "$( echo "$SSH_ORIGINAL_COMMAND" | cut -d ' ' -f 1 )" == 'rsync' ]] || exit 2
[[ "$( echo "$SSH_ORIGINAL_COMMAND" | cut -d ' ' -f 2 )" == '--server' ]] || exit 3
[[ "$( echo "$SSH_ORIGINAL_COMMAND" | cut -d ' ' -f 3 )" == '--sender' ]] || exit 4
# # FIXME TODO this can be in any order
[[ "$( echo "$SSH_ORIGINAL_COMMAND" | cut -d ' ' -f 4 )" == '-vlogDtpAXrze.iLsfx' ]] || exit 5
# this is just a dot, yes
[[ "$( echo "$SSH_ORIGINAL_COMMAND" | cut -d ' ' -f 5 )" == '.' ]] || exit 6
# we need this for later
# NOTICE: strong assumption here: no spaces in source paths!
# TODO : support paths containing spaces, too!
SRC="$( echo "$SSH_ORIGINAL_COMMAND" | cut -d ' ' -f 6 )"
[ -z ${SSH_RSYNC_DEBUG+x} ] || echo "+-- source is: $SRC" >> "$SSH_RSYNC_DEBUG"
# also, make sure there are no additionall elements of the command
[[ "$( echo "$SSH_ORIGINAL_COMMAND" | cut -d ' ' -f 7 )" == '' ]] || exit 7
# DEBUG
[ -z ${SSH_RSYNC_DEBUG+x} ] || echo "+-- command seems okay, proceeding" >> "$SSH_RSYNC_DEBUG"
# if there are sources we want to limit rsync to, in SSH_RSYNC_ALLOWED_SRC,
# verify that the src matches; anything else -> error and exit
#
# you can enable source path checking by prepending this to the authorized_keys key command:
# "SSH_RSYNC_ALLOWED_SRC=('/some/path/1' '/some/path/2' '/a path/with/spaces') "
# (yes, without commas!)
# do we have a list of allowed sources?
if [ ! -z ${SSH_RSYNC_ALLOWED_SRC+x} ]; then
SRC_ALLOWED=0
# we have to go through all the allowed src paths
for ALLOWED_SRC in "${SSH_RSYNC_ALLOWED_SRC[@]}"; do
# stripping the ending '/' (if there) from both for comparison
if [ "${ALLOWED_SRC%/}" == "${SRC%/}" ]; then
# mark as allowed and we're done
SRC_ALLOWED=1
break
fi
done
# so, do we have an allowed source?
if [ $SRC_ALLOWED == 1 ]; then
# well, no; this shouldn't happen...
# ...unless somebody's doing something nasty
[ -z ${SSH_RSYNC_DEBUG+x} ] && echo "+-- source not in \$SSH_RSYNC_ALLOWED_SRC! exiting." >> "$SSH_RSYNC_DEBUG"
exit 8
fi
fi
# run the rsync command
[ -z ${SSH_RSYNC_DEBUG+x} ] && echo "+-- rsync..." >> "$SSH_RSYNC_DEBUG"
# yes, the output has to go to stdout, that's how rsync communicates through SSH
rsync --server --sender -vlogDtpAXrze.iLsfx . "$SRC"
[ -z ${SSH_RSYNC_DEBUG+x} ] && echo "+-- we're done." >> "$SSH_RSYNC_DEBUG"
\ No newline at end of file
#!/bin/bash
#
# database utility scripts
#
#
# "we've been properly sourced" flag function
function sc_properly_sourced() {
return 0
}
export -f sc_properly_sourced
#
# display a general error message
# $1 -- additional error msg to display
function display_error {
echo
echo "########################################"
echo "### AN ERROR OCCURED, ABORTING! ###"
echo "### --------------------------- ###"
echo "### inspect the log output carefully ###"
echo "########################################"
echo
# if we have an additional error msg set, use it
[ -z ${1+x} ] || {
echo "$1"
echo
}
}
export -f display_error
#
# rudimentary error handling -- exit the script
function display_error_and_quit {
if [ -z ${1+x} ]; then
display_error
else
display_error "$1"
fi
read
exit 1
}
export -f display_error_and_quit
#
# rudimentary error handling -- continue with next iteration of a loop
function display_error_and_continue {
if [ -z ${1+x} ]; then
display_error
else
display_error "$1"
fi
read
continue
}
export -f display_error_and_continue
#
# rudimentary error handling -- break the loop
function display_error_and_break {
if [ -z ${1+x} ]; then
display_error
else
display_error "$1"
fi
read
break
}
export -f display_error_and_break
#
# utility -- clean the variables used in the dump_*_scripts
function clean_db_vars {
unset DATABASES
unset DBUSERS
unset DBGRANTS
}
#
# create a read-only user for a mysql database (or databases)
# $1 - database host to connect to
# $2 - user to connect as
# $3 - password for that user
# $4 - username to be created
# $5 - optional; hostname of the user to be created
# (if unset or an empty string, '%' will be used)
# $6 - optional; password for the user to be created
# (if unset or an empty string, random password will be generated and printed on-screen)
#
# the user gets SELECT, SHOW DATABASES, LOCK TABLES, EXECUTE, SHOW VIEW privileges on *all databases*
function create_readonly_mysql_user() {
echo
echo "creating a read-only user..."
echo "+-- connection details:"
echo " +-- db host to connect to: $1"
echo " +-- db user to connect as: $2"
echo "+-- read-only user details:"
# the read-only user name -- simple enough
RUSER_NAME="$4"
echo " +-- username: $RUSER_NAME"
# read-only user hostname
RUSER_HOST="%"
if [ ! -z ${5+x} ]; then
RUSER_HOST="$5"
fi
echo " +-- hostname: $RUSER_HOST"
# read-only user password
# if supplied, use it
if [ ! -z ${6+x} ]; then
RUSER_PW="$6"
echo " +-- password: (provided on the command line)"
# otherwise, create a random one
else
RUSER_PW="$( pwgen -s 24 1 )" || display_error_and_quit "Error generating password; is pwgen installed and in \$PATH?"
echo " +-- password: $RUSER_PW"
fi
# do the magic
mysql -h "$1" -u "$2" --password="$3" --batch -e "GRANT SELECT, SHOW DATABASES, LOCK TABLES, EXECUTE, SHOW VIEW ON *.* TO '$RUSER_NAME'@'$RUSER_HOST' IDENTIFIED BY '$RUSER_PW';" \
|| display_error_and_quit "Error creating a read-only user."
}
export -f create_readonly_mysql_user
#
# this assumes we have *at least* read-only access to databases we want to dump
# $1 - targed directory; the directory and structure within ($1/, $1/schemas/, $1/dumps/) will be made sure to exist
# $2 - database host to connect to
# $3 - user to connect as
# $4 - password for that user
# $5 and further - optional, the databases to dump (dumping all if empty)
#
# relevant: https://dev.mysql.com/doc/refman/5.6/en/mysqldump.html
#
# databases are dumped as schema (containing grants related to the database and CREATE statements) and data dump spearately,
# put in $1/schemas/$database_name.schema.sql and $1/dumps/$database_name.dump.sql respectively
#
# this does *not* dump internal MySQL information_schema and performance_schema databases.
function dump_mysql_dbs {
# clean the vars
clean_db_vars
# create the directory structure
mkdir -p "$1"
mkdir -p "$1/schemas"
mkdir -p "$1/dumps"
#
# - if we have no additional arguments (5th and onwards), we need to dump all the databases (well, apart from information_schema and performance_schema)
# - if we have such arguments, we can use WHERE `Database` IN ('$5', ...)
DATABASES_WHERE=""
if [ ! -z ${5+x} ]; then
DATABASES_WHERE="WHERE \`Database\` IN ( '` echo "${@:5}" | sed -r -e "s/ /', '/g" ` )"
fi
# get the list of databases
DATABASES="$( mysql -h "$2" -u "$3" --password="$4" --batch -e "SHOW DATABASES $DATABASES_WHERE;" | egrep -v '(Database|information_schema|performance_schema)' )"
# either the code is 0, or the command failed; act accordingly.
[ $? -eq 0 ] || display_error_and_quit
# did we find anything?
if [[ "$DATABASES" == "" ]]; then
echo " +-- no databases found."
return 0
fi
#
# if we had a list of databases, did we find all of them?
# rationale: if we have a specific list of databases, we *expect* them to be dumped
# if we cannot find some of them, that should be clearly signalled
if [ ! -z ${5+x} ]; then
DATABASES_MISSING="$( echo "${@:5} $DATABASES" | tr ' ' '\n' | sort | uniq -u )"
if [[ "$DATABASES_MISSING" != "" ]]; then
# if not, complain and exit
display_error_and_quit "Not all databases found; missing: $DATABASES_MISSING"
fi
fi
# whoo hoo, found *something*
echo " +-- found `echo "$DATABASES" | wc -l` databases..."
# is the db using authentication_string, or password for user passwords?
if [[ "$( mysql -h "$2" -u "$3" --password="$4" --batch --skip-column-names -e "SELECT COUNT(*) FROM information_schema.COLUMNS WHERE TABLE_SCHEMA = 'mysql' AND TABLE_NAME = 'user' AND COLUMN_NAME = 'password';" )" == "0" ]]; then
PWCOL="authentication_string"
else
PWCOL="password"
fi
echo " +-- password column: $PWCOL"
# get the list of users
DBUSERS="$( mysql -h "$2" -u "$3" --password="$4" --batch --skip-column-names -e "SELECT CONCAT('',QUOTE(user),'@',QUOTE(host),':',$PWCOL) FROM mysql.user WHERE user<>'';")"
[ $? -eq 0 ] || display_error_and_quit
echo " +-- found `echo "$DBUSERS" | wc -l` users..."
# get all grants for said users
DBGRANTS=""
for user_host_pass in ${DBUSERS}; do
# ignore root
if echo "$user_host_pass" | grep "'root'@" >/dev/null; then
continue
fi
DBUSER="$( echo "$user_host_pass" | cut -d ':' -f 1 )"
DBUSERPW="$( echo "$user_host_pass" | cut -d ':' -f 2 )"
# get the grants # add IDENTIFIED BY <password>, but only when there was no IDENTIFIED BY there already
DBGRANTS=`echo -ne "$DBGRANTS\n$( mysql -h "$2" -u "$3" --password="$4" --batch --skip-column-names -e "SHOW GRANTS FOR $DBUSER;" | sed -r -e "s/(IDENTIFIED BY.*)$/ IDENTIFIED BY PASSWORD '$DBUSERPW'/g" )"`
[ $? -eq 0 ] || display_error_and_quit
done
# iterate through databases
for db in ${DATABASES}; do
echo " +-- working on db: $db"
# files
SCHEMA_FILE="$1/schemas/$db.schema.sql"
DUMP_FILE="$1/dumps/$db.dump.sql"
echo " +-- schema file : $SCHEMA_FILE"
echo " +-- dump file : $DUMP_FILE"
# schema
echo " +-- schema..."
mysqldump -h "$2" -u "$3" --password="$4" --add-drop-database --no-data --databases "$db" > "$SCHEMA_FILE"
[ $? -eq 0 ] || display_error_and_quit
# grants
echo " +-- grants..."
echo '-- ------ --' >> "$SCHEMA_FILE"
echo '-- GRANTS --' >> "$SCHEMA_FILE"
echo '-- ------ --' >> "$SCHEMA_FILE"
# TODO FIXME lacking password information...
# SET PASSWORD FOR ... = HASH_STRING
echo "${DBGRANTS}" | grep "ON \*." | sed -r -e 's/$/;/' >> "$SCHEMA_FILE"
[ $? -lt 2 ] || display_error_and_quit # less than 2 here, because 0 on "found", 1 on "not found", 2 and above on "error"
echo "${DBGRANTS}" | grep "ON \`${db}\`" | sed -r -e 's/$/;/' >> "$SCHEMA_FILE"
[ $? -lt 2 ] || display_error_and_quit # less than 2 here, because 0 on "found", 1 on "not found", 2 and above on "error"
# we need this for the privs to work after restore
echo "FLUSH PRIVILEGES;" >> "$SCHEMA_FILE"
# dump
echo " +-- dump..."
mysqldump -h "$2" -u "$3" --password="$4" --no-create-info --no-create-db --databases "$db" >> "$DUMP_FILE"
[ $? -eq 0 ] || display_error_and_quit
echo " +-- done."
done
# set the permissions so that
# this will not stop a root user from using these, but should make them stand out well enough
echo " +-- setting permissions on mysql.*"
chmod a= "$1"/*/mysql.*.sql
# we're done
echo " +-- done."
}
export -f dump_mysql_dbs
#
# create a read-only user for a postgresql database (or databases)
# $1 - database host to connect to
# $2 - user to connect as
# $3 - password for that user
# $4 - username to be created
# $5 - optional; hostname of the user to be created
# (if unset or an empty string, '%' will be used)
# $6 - optional; password for the user to be created
# (if unset or an empty string, random password will be generated and printed on-screen)
#
# not as simple as the mysql version; we need to make sure that:
# a). given role is created...
# b). ...and granted CONNECT on all databases...
# c). ...and granted SELECT on pg_authid...
# d). ...and granted USAGE on all schemas in all databases...
# e). ...and granted SELECT on all tables and sequences in all schemas in all databases...
# f). ...and granted EXECUTE on all functions in all schemas in all databases...
# g). ...and that the default privileges for all roles in all schemas in all databases grant
# the new read-only role SELECT on tables and sequences, and EXECUTE on functions
# created in the future.
#
# ref. https://wiki.postgresql.org/images/d/d1/Managing_rights_in_postgresql.pdf
#
# granting the readonly user access to all tables in a database:
# CREATE ROLE readonly NOSUPERUSER NOCREATEDB NOCREATEROLE LOGIN NOREPLICATION PASSWORD '$DUMP_PASS';
# GRANT SELECT ON pg_authid TO readonly;
# and for each database:
# GRANT CONNECT ON DATABASE a_database TO readonly;
# \c a_database
# GRANT USAGE ON SCHEMA a_schema TO readonly;
# GRANT SELECT ON ALL TABLES IN SCHEMA a_schema TO readonly;
# GRANT SELECT ON ALL SEQUENCES IN SCHEMA a_schema TO readonly;
# ALTER DEFAULT PRIVILEGES IN SCHEMA a_schema GRANT SELECT ON TABLES TO readonly;
# ALTER DEFAULT PRIVILEGES IN SCHEMA a_schema GRANT SELECT ON SEQUENCES TO readonly;
# a_schema is usually "public"
#
# CREATE ROLE readonly LOGIN PASSWORD 'some_pass';
# -- Existing objects
# GRANT CONNECT ON DATABASE the_db TO readonly;
# GRANT USAGE ON SCHEMA public TO readonly;
# GRANT SELECT ON ALL TABLES IN SCHEMA public TO readonly;
# GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO readonly;
# GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO readonly;
# -- New objects
# ALTER DEFAULT PRIVILEGES FOR ROLE ddl_user IN SCHEMA public GRANT SELECT ON TABLES TO readonly;
# ALTER DEFAULT PRIVILEGES FOR ROLE ddl_user IN SCHEMA public GRANT SELECT ON SEQUENCES TO readonly;
# ALTER DEFAULT PRIVILEGES FOR ROLE ddl_user IN SCHEMA public GRANT EXECUTE ON FUNCTIONS TO readonly;
# https://wiki.postgresql.org/images/d/d1/Managing_rights_in_postgresql.pdf
function create_readonly_postgres_user() {
DATABASES="$( PGPASSWORD="$4" psql -h "$2" -U "$3" -lAqt | grep '|' | cut -d '|' -f 1 | egrep -v "template[0-9]" )"
}
export -f create_readonly_postgres_user
#
# this assumes we have *at least* read-only access to databases we want to dump
# $1 - targed directory; the directory and structure within ($1/, $1/schemas/, $1/dumps/) will be made sure to exist
# $2 - database host
# $3 - user to connect as
# $4 - password for that user
# $5 and further - optional, the databases to dump (dumping all if empty)
#
# relevant: http://www.postgresql.org/docs/current/static/app-pgdump.html
#
# databases are dumped as schema (containing grants related to the database and CREATE statements) and data dump spearately,
# put in $1/schemas/$database_name.schema.sql and $1/dumps/$database_name.dump.sql respectively
function dump_postgres_dbs {
# clean the vars
clean_db_vars
# create the directory
mkdir -p "$1"
mkdir -p "$1/schemas"
mkdir -p "$1/dumps"
# get the list of databases
# -l/--list List all available databases,then exit. Other non-connection
# -A/--no-align Switches to unaligned output mode.
# -q/--quiet Specifies that psql should do its work quietly.
# -t/--tuples-only Turn off printing of column names and result row count footers, etc.
# ref. http://linuxcommand.org/man_pages/psql1.html
DATABASES="$( PGPASSWORD="$4" psql -h "$2" -U "$3" -lAqt | grep '|' | cut -d '|' -f 1 | egrep -v "template[0-9]" )"
# either the code is 0, or the command failed; act accordingly.
[ $? -eq 0 ] || display_error_and_quit
# do we have any databases?
if [[ "$DATABASES" == "" ]]; then
echo " +-- no databases found."
return 0
fi
#
# if we had a list of databases, did we find all of them?
# rationale: if we have a specific list of databases, we *expect* them to be dumped
# if we cannot find some of them, that should be clearly signalled
if [ ! -z ${5+x} ]; then
DATABASES_MISSING="$( echo "${@:5} $DATABASES" | tr ' ' '\n' | sort | uniq -u )"
if [[ "$DATABASES_MISSING" != "" ]]; then
# if not, complain and exit
display_error_and_quit "Not all databases found; missing: $DATABASES_MISSING"
fi
fi
# list of users
DBUSERS="$( PGPASSWORD="$4" pg_dumpall -h "$2" --globals-only -U "$3" | egrep '(CREATE|ALTER) ROLE' )"
[ $? -eq 0 ] || display_error_and_quit
echo " +-- found `echo "$DBUSERS" | egrep "^CREATE" | wc -l` users..."
# iterate through databases
for db in ${DATABASES}; do
echo " +-- working on db: $db"
# files
SCHEMA_FILE="$1/schemas/$db.schema.sql"
DUMP_FILE="$1/dumps/$db.dump.sql"
echo " +-- schema file : $SCHEMA_FILE"
echo " +-- dump file : $DUMP_FILE"
# schema
echo " +-- schema..."
PGPASSWORD="$4" pg_dump -h "$2" --create --clean --schema-only -U "$3" "$db" > "$SCHEMA_FILE"
[ $? -eq 0 ] || display_error_and_quit
# grants
echo " +-- grants..."
echo '-- ------ --' >> "$SCHEMA_FILE"
echo '-- GRANTS --' >> "$SCHEMA_FILE"
echo '-- ------ --' >> "$SCHEMA_FILE"
USERS_GREP="$( cat "$SCHEMA_FILE" | grep 'GRANT' | sed -r -e 's/^GRANT.+TO ([^;]+);/\1/' | sort | uniq )"
USERS_GREP="$( echo $USERS_GREP | tr ' ' '|' )"
echo "$DBUSERS" | egrep " ($USERS_GREP)[; ]" >> "$SCHEMA_FILE"
# dump
echo " +-- dump..."
PGPASSWORD="$4" pg_dump -h "$2" --create --data-only --format custom -U "$3" "$db" >> "$DUMP_FILE"
[ $? -eq 0 ] || display_error_and_quit
echo " +-- done."
done
# we're done
echo " +-- done."
}
export -f dump_postgres_dbs
#
# elasticsearch
#
# info:
# https://www.elastic.co/guide/en/elasticsearch/guide/master/backing-up-your-cluster.html
# restoring:
# https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html#_restoring_to_a_different_cluster
#
# $1 - targed directory
# $2 - database host
# $3 - optional; location where the temporary backup directory will be created;
# if not set, /tmp will be used
#
# we're cheating here a bit:
# - create a new backup repository
# - make a snapshot
# - copy the snapshot data to the right location, outside the repository directory
# - remove the snapshot
# - remove the backup repository
#
# NOTICE: this will *ONLY* work on a single-node cluster, or on a cluster where all the nodes
# have access to the tmp snapshot directory!
#
# NOTICE: the temporary dump directory has to be accessible both to the elasticsearch server, and to this script
# (for instance, if they're running in separate docker containers,
# this directory has to be volume-mounted in both)
function dump_elasticsearch_dbs {
# name of the backup
BACKUP_NAME="temp_backup_$( date +%F )"
# the temporary backup location
# this location has to be accessible to the elasticsearch server, and to this script
# (for instance, if they're running in separate docker containers, this directory has to be volume-mounted in both)
if [ -z ${3+x} ]; then
BACKUP_TEMP_DIR="$( mktemp -d --tmpdir="/tmp" )"
else
BACKUP_TEMP_DIR="$( mktemp -d --tmpdir="$3" )"
fi
# let's make sure we have the right perms to create the snapshot
chmod a+rwX "$BACKUP_TEMP_DIR"
# clean the vars
clean_db_vars
# create the directory
echo " +-- setup..."
echo " +-- directory: $1"
mkdir -p "$1"
# create the temporary backup repository
echo " +-- ES snapshot repository in: $BACKUP_TEMP_DIR"
curl -X PUT -H "Content-Type: application/json" "http://$2:9200/_snapshot/$BACKUP_NAME" -d "{
\"type\": \"fs\",
\"settings\": {
\"location\": \"$BACKUP_TEMP_DIR\"
}
}"
echo
# create a snapshot, blocking until it's done
echo " +-- dump..."
curl -X PUT "http://$2:9200/_snapshot/$BACKUP_NAME/snapshot?wait_for_completion=true" || display_error_and_quit 'Error while generating the snapshot!'
echo
# copy the backup to a less temporary location
cp -a "$BACKUP_TEMP_DIR"/* "$1/" || display_error_and_quit 'Error while copying the snapshot data to the target directory!'
# cleanup
echo " +-- cleanup..."
curl -X DELETE "http://$2:9200/_snapshot/$BACKUP_NAME/snapshot" || display_error_and_quit 'Error while cleaning up the snapshot!'
echo
curl -X DELETE "http://$2:9200/_snapshot/$BACKUP_NAME/" || display_error_and_quit 'Error while cleaning up the backup repository!'
echo
rm -rf "$BACKUP_TEMP_DIR/*"
echo " +-- done..."
}
export -f dump_elasticsearch_dbs
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment