Learn to create encrypted Consul backups using GPG encryption with automated snapshot scripts, secure key management, and remote storage integration for production disaster recovery.
Prerequisites
- Active Consul cluster
- Root or sudo access
- Basic knowledge of GPG and backup concepts
- AWS S3 access (optional for remote storage)
What this solves
Consul cluster data requires regular encrypted backups to prevent data loss and ensure disaster recovery capabilities. This tutorial implements GPG encryption for Consul snapshots, automated backup scheduling, and secure storage strategies to protect sensitive service discovery and configuration data in production environments.
Prerequisites and system preparation
Update system packages
Start by updating your package manager to ensure you have the latest security patches and package versions.
sudo apt update && sudo apt upgrade -y
Install required packages
Install GPG, backup utilities, and compression tools needed for encrypted Consul backups.
sudo apt install -y gnupg2 gzip bzip2 curl jq rsync awscli
Verify Consul installation
Confirm that Consul is installed and accessible with the correct permissions for backup operations.
consul version
consul members
consul operator raft list-peers
Configure GPG encryption for backups
Create dedicated backup user
Create a system user specifically for handling Consul backups with minimal privileges.
sudo useradd -r -s /bin/bash -d /opt/consul-backup -m consul-backup
sudo mkdir -p /opt/consul-backup/{keys,scripts,backups,logs}
sudo chown -R consul-backup:consul-backup /opt/consul-backup
sudo chmod 700 /opt/consul-backup
Generate GPG key pair for encryption
Create a dedicated GPG key pair for encrypting Consul backups with strong security settings.
sudo -u consul-backup gpg --batch --full-generate-key <
Export and secure GPG keys
Export the public key for sharing and securely store the private key with proper permissions.
sudo -u consul-backup bash -c '
GPG_KEY_ID=$(gpg --list-secret-keys --keyid-format LONG | grep sec | cut -d"/" -f2 | cut -d" " -f1)
echo $GPG_KEY_ID > /opt/consul-backup/keys/key-id.txt
gpg --armor --export $GPG_KEY_ID > /opt/consul-backup/keys/public-key.asc
gpg --armor --export-secret-keys $GPG_KEY_ID > /opt/consul-backup/keys/private-key.asc
'
sudo chmod 600 /opt/consul-backup/keys/private-key.asc
sudo chmod 644 /opt/consul-backup/keys/public-key.asc
sudo chmod 644 /opt/consul-backup/keys/key-id.txt
Create GPG configuration
Configure GPG settings for automated operations without interactive prompts.
use-agent
pinentry-mode loopback
no-tty
batch
yes
quiet
no-greeting
no-permission-warning
no-secmem-warning
sudo chown consul-backup:consul-backup /opt/consul-backup/.gnupg/gpg.conf
sudo chmod 600 /opt/consul-backup/.gnupg/gpg.conf
Create automated backup scripts
Create main backup script
Develop a comprehensive script that creates Consul snapshots, compresses them, and encrypts with GPG.
#!/bin/bash
Consul Encrypted Backup Script
set -euo pipefail
Configuration
BACKUP_DIR="/opt/consul-backup/backups"
LOG_DIR="/opt/consul-backup/logs"
KEY_DIR="/opt/consul-backup/keys"
DATE=$(date +"%Y%m%d-%H%M%S")
HOSTNAME=$(hostname -s)
BACKUP_NAME="consul-snapshot-${HOSTNAME}-${DATE}"
LOG_FILE="${LOG_DIR}/backup-${DATE}.log"
RETENTION_DAYS=30
COMPRESSION_LEVEL=9
Consul configuration
CONSUL_HTTP_ADDR=${CONSUL_HTTP_ADDR:-"127.0.0.1:8500"}
CONSUL_TOKEN_FILE=${CONSUL_TOKEN_FILE:-"/etc/consul/tokens/backup-token"}
CONSUL_CACERT=${CONSUL_CACERT:-"/etc/consul/tls/ca-cert.pem"}
CONSUL_CLIENT_CERT=${CONSUL_CLIENT_CERT:-"/etc/consul/tls/client-cert.pem"}
CONSUL_CLIENT_KEY=${CONSUL_CLIENT_KEY:-"/etc/consul/tls/client-key.pem"}
GPG key ID
GPG_KEY_ID=$(cat ${KEY_DIR}/key-id.txt)
Logging function
log() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "${LOG_FILE}"
}
Error handling
error_exit() {
log "ERROR: $1"
exit 1
}
Start backup process
log "Starting Consul backup process"
Check Consul connectivity
log "Checking Consul connectivity"
if ! consul members >/dev/null 2>&1; then
error_exit "Cannot connect to Consul cluster"
fi
Create temporary directory for processing
TEMP_DIR=$(mktemp -d)
trap "rm -rf ${TEMP_DIR}" EXIT
Create snapshot
log "Creating Consul snapshot"
if [[ -f "${CONSUL_TOKEN_FILE}" ]]; then
CONSUL_HTTP_TOKEN=$(cat "${CONSUL_TOKEN_FILE}")
export CONSUL_HTTP_TOKEN
fi
consul snapshot save "${TEMP_DIR}/${BACKUP_NAME}.snap" 2>>"${LOG_FILE}" || error_exit "Failed to create Consul snapshot"
Verify snapshot
log "Verifying snapshot integrity"
consul snapshot inspect "${TEMP_DIR}/${BACKUP_NAME}.snap" >/dev/null 2>>"${LOG_FILE}" || error_exit "Snapshot verification failed"
Compress snapshot
log "Compressing snapshot with gzip level ${COMPRESSION_LEVEL}"
gzip -${COMPRESSION_LEVEL} "${TEMP_DIR}/${BACKUP_NAME}.snap" || error_exit "Compression failed"
Encrypt compressed snapshot
log "Encrypting backup with GPG key ${GPG_KEY_ID}"
gpg --trust-model always --batch --yes --compress-algo 0 \
--cipher-algo AES256 --digest-algo SHA512 \
--s2k-digest-algo SHA512 --s2k-cipher-algo AES256 \
--recipient "${GPG_KEY_ID}" \
--encrypt "${TEMP_DIR}/${BACKUP_NAME}.snap.gz" \
2>>"${LOG_FILE}" || error_exit "GPG encryption failed"
Move encrypted backup to final location
log "Moving encrypted backup to storage directory"
mv "${TEMP_DIR}/${BACKUP_NAME}.snap.gz.gpg" "${BACKUP_DIR}/" || error_exit "Failed to move backup file"
Create metadata file
log "Creating backup metadata"
cat > "${BACKUP_DIR}/${BACKUP_NAME}.meta" </dev/null || stat -c%s "${TEMP_DIR}/${BACKUP_NAME}.snap")",
"compressed_size": "$(stat -f%z "${BACKUP_DIR}/${BACKUP_NAME}.snap.gz.gpg" 2>/dev/null || stat -c%s "${BACKUP_DIR}/${BACKUP_NAME}.snap.gz.gpg")",
"backup_file": "${BACKUP_NAME}.snap.gz.gpg"
}
EOF
Set proper permissions
chmod 600 "${BACKUP_DIR}/${BACKUP_NAME}.snap.gz.gpg"
chmod 644 "${BACKUP_DIR}/${BACKUP_NAME}.meta"
Clean old backups
log "Cleaning backups older than ${RETENTION_DAYS} days"
find "${BACKUP_DIR}" -name "consul-snapshot-*.gpg" -mtime +${RETENTION_DAYS} -delete 2>>"${LOG_FILE}" || true
find "${BACKUP_DIR}" -name "consul-snapshot-*.meta" -mtime +${RETENTION_DAYS} -delete 2>>"${LOG_FILE}" || true
Clean old logs
find "${LOG_DIR}" -name "backup-*.log" -mtime +7 -delete 2>/dev/null || true
log "Backup completed successfully: ${BACKUP_NAME}.snap.gz.gpg"
log "Backup size: $(du -h "${BACKUP_DIR}/${BACKUP_NAME}.snap.gz.gpg" | cut -f1)"
log "Available backups: $(ls -1 "${BACKUP_DIR}"/*.gpg 2>/dev/null | wc -l)"
exit 0
sudo chown consul-backup:consul-backup /opt/consul-backup/scripts/consul-backup.sh
sudo chmod 750 /opt/consul-backup/scripts/consul-backup.sh
Create backup restoration script
Build a script to decrypt and restore Consul snapshots from encrypted backups.
#!/bin/bash
Consul Backup Restoration Script
set -euo pipefail
Configuration
BACKUP_DIR="/opt/consul-backup/backups"
LOG_DIR="/opt/consul-backup/logs"
DATE=$(date +"%Y%m%d-%H%M%S")
LOG_FILE="${LOG_DIR}/restore-${DATE}.log"
Usage function
usage() {
echo "Usage: $0 "
echo "Example: $0 consul-snapshot-web01-20241201-143022.snap.gz.gpg"
exit 1
}
Check arguments
if [[ $# -ne 1 ]]; then
usage
fi
BACKUP_FILE="$1"
Validate backup file exists
if [[ ! -f "${BACKUP_DIR}/${BACKUP_FILE}" ]]; then
echo "Error: Backup file ${BACKUP_DIR}/${BACKUP_FILE} not found"
exit 1
fi
Logging function
log() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "${LOG_FILE}"
}
Error handling
error_exit() {
log "ERROR: $1"
exit 1
}
log "Starting Consul backup restoration from ${BACKUP_FILE}"
Create temporary directory
TEMP_DIR=$(mktemp -d)
trap "rm -rf ${TEMP_DIR}" EXIT
Decrypt backup
log "Decrypting backup file"
gpg --batch --yes --decrypt "${BACKUP_DIR}/${BACKUP_FILE}" > "${TEMP_DIR}/snapshot.snap.gz" 2>>"${LOG_FILE}" || error_exit "GPG decryption failed"
Decompress
log "Decompressing snapshot"
gunzip "${TEMP_DIR}/snapshot.snap.gz" || error_exit "Decompression failed"
Verify snapshot before restoration
log "Verifying snapshot integrity"
consul snapshot inspect "${TEMP_DIR}/snapshot.snap" >/dev/null 2>>"${LOG_FILE}" || error_exit "Snapshot verification failed"
Restore snapshot
log "Restoring Consul snapshot"
read -p "Are you sure you want to restore this snapshot? This will overwrite current data (y/N): " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
consul snapshot restore "${TEMP_DIR}/snapshot.snap" 2>>"${LOG_FILE}" || error_exit "Snapshot restoration failed"
log "Restoration completed successfully"
else
log "Restoration cancelled by user"
fi
sudo chown consul-backup:consul-backup /opt/consul-backup/scripts/consul-restore.sh
sudo chmod 750 /opt/consul-backup/scripts/consul-restore.sh
Create backup verification script
Implement automated verification to ensure backup integrity and decryption capabilities.
#!/bin/bash
Consul Backup Verification Script
set -euo pipefail
BACKUP_DIR="/opt/consul-backup/backups"
LOG_DIR="/opt/consul-backup/logs"
DATE=$(date +"%Y%m%d-%H%M%S")
LOG_FILE="${LOG_DIR}/verify-${DATE}.log"
Logging function
log() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "${LOG_FILE}"
}
log "Starting backup verification process"
Verify recent backups
BACKUPS_VERIFIED=0
BACKUPS_FAILED=0
for backup_file in $(ls -t "${BACKUP_DIR}"/*.gpg 2>/dev/null | head -5); do
backup_name=$(basename "$backup_file")
log "Verifying ${backup_name}"
# Create temporary directory
TEMP_DIR=$(mktemp -d)
# Try to decrypt and verify
if gpg --batch --yes --decrypt "$backup_file" > "${TEMP_DIR}/test.snap.gz" 2>/dev/null; then
if gunzip "${TEMP_DIR}/test.snap.gz" 2>/dev/null; then
if consul snapshot inspect "${TEMP_DIR}/test.snap" >/dev/null 2>&1; then
log "✓ ${backup_name} verification successful"
((BACKUPS_VERIFIED++))
else
log "✗ ${backup_name} snapshot integrity check failed"
((BACKUPS_FAILED++))
fi
else
log "✗ ${backup_name} decompression failed"
((BACKUPS_FAILED++))
fi
else
log "✗ ${backup_name} decryption failed"
((BACKUPS_FAILED++))
fi
rm -rf "${TEMP_DIR}"
done
log "Verification complete: ${BACKUPS_VERIFIED} successful, ${BACKUPS_FAILED} failed"
if [[ $BACKUPS_FAILED -gt 0 ]]; then
exit 1
fi
exit 0
sudo chown consul-backup:consul-backup /opt/consul-backup/scripts/verify-backup.sh
sudo chmod 750 /opt/consul-backup/scripts/verify-backup.sh
Configure secure storage integration
Create S3 sync script for remote storage
Set up secure remote storage synchronization with S3-compatible storage for offsite backup retention.
#!/bin/bash
S3 Backup Sync Script
set -euo pipefail
Configuration - adjust these variables
S3_BUCKET="consul-backups-$(hostname -s)"
S3_PREFIX="consul/$(date +%Y/%m)"
BACKUP_DIR="/opt/consul-backup/backups"
LOG_DIR="/opt/consul-backup/logs"
DATE=$(date +"%Y%m%d-%H%M%S")
LOG_FILE="${LOG_DIR}/s3sync-${DATE}.log"
RETENTION_DAYS=90
AWS CLI configuration check
if ! aws sts get-caller-identity >/dev/null 2>&1; then
echo "Error: AWS CLI not configured or credentials invalid"
exit 1
fi
Logging function
log() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "${LOG_FILE}"
}
log "Starting S3 backup synchronization"
Sync recent backups to S3
log "Syncing backups to S3: s3://${S3_BUCKET}/${S3_PREFIX}/"
aws s3 sync "${BACKUP_DIR}" "s3://${S3_BUCKET}/${S3_PREFIX}/" \
--exclude "*" \
--include "*.gpg" \
--include "*.meta" \
--storage-class STANDARD_IA \
--server-side-encryption AES256 \
2>>"${LOG_FILE}" || {
log "S3 sync failed"
exit 1
}
Clean old S3 backups
log "Cleaning S3 backups older than ${RETENTION_DAYS} days"
CUTOFF_DATE=$(date -d "${RETENTION_DAYS} days ago" +%Y-%m-%d)
aws s3 ls "s3://${S3_BUCKET}/consul/" --recursive | \
awk '$1 <= "'${CUTOFF_DATE}'" {print $4}' | \
while read -r key; do
if [[ -n "$key" ]]; then
log "Deleting old backup: s3://${S3_BUCKET}/${key}"
aws s3 rm "s3://${S3_BUCKET}/${key}" 2>>"${LOG_FILE}" || true
fi
done
log "S3 synchronization completed"
exit 0
sudo chown consul-backup:consul-backup /opt/consul-backup/scripts/sync-to-s3.sh
sudo chmod 750 /opt/consul-backup/scripts/sync-to-s3.sh
Configure AWS credentials for backup user
Set up AWS credentials for the backup user with minimal required permissions for S3 operations.
sudo -u consul-backup mkdir -p /opt/consul-backup/.aws
sudo -u consul-backup tee /opt/consul-backup/.aws/credentials >/dev/null <
Create S3 bucket and IAM policy
Create the S3 bucket and IAM policy with minimal permissions for secure backup storage.
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:ListBucket",
"s3:GetBucketLocation"
],
"Resource": "arn:aws:s3:::consul-backups-*"
},
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject"
],
"Resource": "arn:aws:s3:::consul-backups-/consul/"
}
]
}
# Create S3 bucket
aws s3 mb s3://consul-backups-$(hostname -s) --region us-east-1
Enable versioning
aws s3api put-bucket-versioning \
--bucket consul-backups-$(hostname -s) \
--versioning-configuration Status=Enabled
Create IAM user and attach policy
aws iam create-user --user-name consul-backup-user
aws iam create-policy --policy-name ConsulBackupPolicy \
--policy-document file://consul-backup-policy.json
aws iam attach-user-policy --user-name consul-backup-user \
--policy-arn arn:aws:iam::ACCOUNT-ID:policy/ConsulBackupPolicy
Set up automated scheduling and monitoring
Configure systemd timer for automated backups
Create systemd service and timer units for reliable automated backup scheduling.
[Unit]
Description=Consul Encrypted Backup Service
After=consul.service
Requires=consul.service
[Service]
Type=oneshot
User=consul-backup
Group=consul-backup
WorkingDirectory=/opt/consul-backup
Environment="PATH=/usr/local/bin:/usr/bin:/bin"
ExecStart=/opt/consul-backup/scripts/consul-backup.sh
ExecStartPost=/opt/consul-backup/scripts/sync-to-s3.sh
PrivateTmp=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths=/opt/consul-backup
NoNewPrivileges=true
[Unit]
Description=Run Consul Backup Every 6 Hours
Requires=consul-backup.service
[Timer]
OnCalendar=--* 00,06,12,18:00:00
RandomizedDelaySec=300
Persistent=true
[Install]
WantedBy=timers.target
sudo systemctl daemon-reload
sudo systemctl enable consul-backup.timer
sudo systemctl start consul-backup.timer
Configure backup verification timer
Set up daily verification of backup integrity to ensure recovery capabilities.
[Unit]
Description=Consul Backup Verification Service
After=consul.service
[Service]
Type=oneshot
User=consul-backup
Group=consul-backup
WorkingDirectory=/opt/consul-backup
Environment="PATH=/usr/local/bin:/usr/bin:/bin"
ExecStart=/opt/consul-backup/scripts/verify-backup.sh
PrivateTmp=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths=/opt/consul-backup
NoNewPrivileges=true
[Unit]
Description=Verify Consul Backups Daily
Requires=consul-backup-verify.service
[Timer]
OnCalendar=daily
RandomizedDelaySec=1800
Persistent=true
[Install]
WantedBy=timers.target
sudo systemctl daemon-reload
sudo systemctl enable consul-backup-verify.timer
sudo systemctl start consul-backup-verify.timer
Create monitoring and alerting script
Implement monitoring to alert on backup failures and track backup health metrics.
#!/bin/bash
Consul Backup Monitoring Script
set -euo pipefail
BACKUP_DIR="/opt/consul-backup/backups"
LOG_DIR="/opt/consul-backup/logs"
ALERT_EMAIL="admin@example.com"
MAX_AGE_HOURS=8
MIN_BACKUPS=3
Check if recent backup exists
RECENT_BACKUP=$(find "${BACKUP_DIR}" -name "*.gpg" -mtime -1 | wc -l)
LATEST_BACKUP=$(ls -t "${BACKUP_DIR}"/*.gpg 2>/dev/null | head -1 || echo "")
Check backup age
if [[ -n "$LATEST_BACKUP" ]]; then
BACKUP_AGE_HOURS=$(echo "($(date +%s) - $(stat -c %Y "$LATEST_BACKUP")) / 3600" | bc)
else
BACKUP_AGE_HOURS=999
fi
Count total backups
TOTAL_BACKUPS=$(ls -1 "${BACKUP_DIR}"/*.gpg 2>/dev/null | wc -l)
Check for failures in recent logs
FAILED_BACKUPS=$(grep -l "ERROR:" "${LOG_DIR}"/backup-*.log 2>/dev/null | \
xargs ls -t 2>/dev/null | head -3 | wc -l)
Generate status report
STATUS_FILE="/opt/consul-backup/status.json"
cat > "$STATUS_FILE" </dev/null || echo "none")",
"backup_age_hours": $BACKUP_AGE_HOURS,
"total_backups": $TOTAL_BACKUPS,
"recent_backups_24h": $RECENT_BACKUP,
"failed_recent_runs": $FAILED_BACKUPS,
"status": "$([ $BACKUP_AGE_HOURS -le $MAX_AGE_HOURS ] && [ $TOTAL_BACKUPS -ge $MIN_BACKUPS ] && [ $FAILED_BACKUPS -eq 0 ] && echo "healthy" || echo "alert")"
}
EOF
Send alert if needed
if [[ $BACKUP_AGE_HOURS -gt $MAX_AGE_HOURS ]] || [[ $TOTAL_BACKUPS -lt $MIN_BACKUPS ]] || [[ $FAILED_BACKUPS -gt 0 ]]; then
ALERT_MSG="Consul Backup Alert - $(hostname)\n"
ALERT_MSG+="Latest backup age: ${BACKUP_AGE_HOURS} hours\n"
ALERT_MSG+="Total backups: ${TOTAL_BACKUPS}\n"
ALERT_MSG+="Recent failures: ${FAILED_BACKUPS}\n"
echo -e "$ALERT_MSG" | mail -s "Consul Backup Alert - $(hostname)" "$ALERT_EMAIL" 2>/dev/null || \
logger "Consul Backup Alert: $ALERT_MSG"
fi
sudo chown consul-backup:consul-backup /opt/consul-backup/scripts/backup-monitor.sh
sudo chmod 750 /opt/consul-backup/scripts/backup-monitor.sh
Verify your setup
Test manual backup creation
Run a manual backup to verify the complete encryption and storage workflow.
sudo -u consul-backup /opt/consul-backup/scripts/consul-backup.sh
ls -la /opt/consul-backup/backups/
tail -20 /opt/consul-backup/logs/backup-*.log
Test backup verification
Verify that backups can be decrypted and validated successfully.
sudo -u consul-backup /opt/consul-backup/scripts/verify-backup.sh
tail -10 /opt/consul-backup/logs/verify-*.log
Check systemd timer status
Confirm that automated backup scheduling is active and properly configured.
systemctl status consul-backup.timer
systemctl status consul-backup-verify.timer
systemctl list-timers consul-backup*
Test restoration process
Perform a test restoration to ensure the complete backup and recovery workflow functions correctly.
# List available backups
ls -la /opt/consul-backup/backups/*.gpg
Test restoration (without actually restoring)
echo "n" | sudo -u consul-backup /opt/consul-backup/scripts/consul-restore.sh consul-snapshot-*.gpg
Common issues
| Symptom | Cause | Fix |
|---|---|---|
| GPG decryption fails | Missing or corrupted GPG keys | gpg --list-keys and regenerate keys if needed |
| Backup script permission denied | Incorrect file ownership or permissions | chown consul-backup:consul-backup /opt/consul-backup -R && chmod 750 scripts/*.sh |
| Consul snapshot creation fails | Missing Consul ACL token or connectivity | Check /etc/consul/tokens/backup-token and network connectivity |
| S3 sync fails with access denied | Incorrect AWS credentials or IAM permissions | Verify AWS credentials and IAM policy allows S3 operations |
| Systemd timer not running | Timer not enabled or service unit errors | systemctl enable consul-backup.timer && systemctl start consul-backup.timer |
| Backup verification fails | Corrupted backup files or GPG issues | Check backup file integrity and GPG key availability |
Next steps
- Configure Consul backup and disaster recovery with automated snapshots and restoration
- Setup remote backup storage with S3-compatible encryption and automated retention policies
- Implement Consul ACL security and encryption for production deployments
- Configure backup encryption with GPG and rsync for secure automated backups
- Monitor Consul with Prometheus and Grafana for service discovery observability
Automated install script
Run this to automate the entire setup
#!/usr/bin/env bash
set -euo pipefail
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
# Configuration
CONSUL_BACKUP_USER="consul-backup"
CONSUL_BACKUP_DIR="/opt/consul-backup"
GPG_KEY_EMAIL="consul-backup@$(hostname -f)"
GPG_KEY_NAME="Consul Backup Key"
log() {
echo -e "${GREEN}[INFO]${NC} $1"
}
warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
error() {
echo -e "${RED}[ERROR]${NC} $1" >&2
exit 1
}
cleanup() {
if [[ -n "${TEMP_DIR:-}" && -d "$TEMP_DIR" ]]; then
rm -rf "$TEMP_DIR"
fi
}
trap cleanup ERR EXIT
check_prerequisites() {
if [[ $EUID -ne 0 ]]; then
error "This script must be run as root"
fi
if ! command -v consul >/dev/null 2>&1; then
error "Consul is not installed or not in PATH"
fi
if ! consul version >/dev/null 2>&1; then
error "Cannot execute consul command"
fi
}
detect_distro() {
if [[ ! -f /etc/os-release ]]; then
error "Cannot detect OS distribution"
fi
. /etc/os-release
case "$ID" in
ubuntu|debian)
PKG_MGR="apt"
PKG_UPDATE="apt update -y"
PKG_INSTALL="apt install -y"
;;
almalinux|rocky|centos|rhel|ol|fedora)
PKG_MGR="dnf"
PKG_UPDATE="dnf update -y"
PKG_INSTALL="dnf install -y"
;;
amzn)
PKG_MGR="yum"
PKG_UPDATE="yum update -y"
PKG_INSTALL="yum install -y"
;;
*)
error "Unsupported distribution: $ID"
;;
esac
}
update_packages() {
log "Updating system packages..."
$PKG_UPDATE || error "Failed to update packages"
}
install_packages() {
log "Installing required packages..."
local packages="gnupg2 gzip bzip2 curl jq rsync"
if command -v aws >/dev/null 2>&1; then
log "AWS CLI already installed"
else
if [[ "$PKG_MGR" == "apt" ]]; then
packages="$packages awscli"
else
packages="$packages awscli2"
fi
fi
$PKG_INSTALL $packages || error "Failed to install packages"
}
create_backup_user() {
log "Creating dedicated backup user..."
if id "$CONSUL_BACKUP_USER" >/dev/null 2>&1; then
warn "User $CONSUL_BACKUP_USER already exists"
else
useradd -r -s /bin/bash -d "$CONSUL_BACKUP_DIR" -m "$CONSUL_BACKUP_USER" || error "Failed to create user"
fi
mkdir -p "$CONSUL_BACKUP_DIR"/{keys,scripts,backups,logs}
chown -R "$CONSUL_BACKUP_USER:$CONSUL_BACKUP_USER" "$CONSUL_BACKUP_DIR"
chmod 700 "$CONSUL_BACKUP_DIR"
}
generate_gpg_key() {
log "Generating GPG key pair..."
TEMP_DIR=$(mktemp -d)
cat > "$TEMP_DIR/gpg-batch" <<EOF
%echo Generating GPG key for Consul backups
Key-Type: RSA
Key-Length: 4096
Subkey-Type: RSA
Subkey-Length: 4096
Name-Real: $GPG_KEY_NAME
Name-Email: $GPG_KEY_EMAIL
Expire-Date: 2y
Passphrase:
%commit
%echo Done
EOF
sudo -u "$CONSUL_BACKUP_USER" gpg --batch --generate-key "$TEMP_DIR/gpg-batch" || error "GPG key generation failed"
local key_id
key_id=$(sudo -u "$CONSUL_BACKUP_USER" gpg --list-keys --with-colons "$GPG_KEY_EMAIL" | awk -F: '/^pub:/ { print $5 }')
if [[ -z "$key_id" ]]; then
error "Failed to retrieve GPG key ID"
fi
echo "$key_id" | sudo -u "$CONSUL_BACKUP_USER" tee "$CONSUL_BACKUP_DIR/keys/gpg-key-id" >/dev/null
chmod 600 "$CONSUL_BACKUP_DIR/keys/gpg-key-id"
log "GPG key generated with ID: $key_id"
}
create_backup_script() {
log "Creating backup script..."
cat > "$CONSUL_BACKUP_DIR/scripts/consul-backup.sh" <<'EOF'
#!/bin/bash
set -euo pipefail
BACKUP_DIR="/opt/consul-backup/backups"
LOG_DIR="/opt/consul-backup/logs"
GPG_KEY_ID=$(cat /opt/consul-backup/keys/gpg-key-id)
CONSUL_TOKEN_FILE="/opt/consul-backup/consul.token"
COMPRESSION_LEVEL=6
RETENTION_DAYS=30
BACKUP_NAME="consul-snapshot-$(date +%Y%m%d-%H%M%S)"
LOG_FILE="${LOG_DIR}/backup-$(date +%Y%m%d).log"
log() {
echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}"
}
error_exit() {
log "ERROR: $1"
exit 1
}
if ! consul members >/dev/null 2>&1; then
error_exit "Cannot connect to Consul cluster"
fi
TEMP_DIR=$(mktemp -d)
trap "rm -rf ${TEMP_DIR}" EXIT
log "Creating Consul snapshot"
if [[ -f "${CONSUL_TOKEN_FILE}" ]]; then
CONSUL_HTTP_TOKEN=$(cat "${CONSUL_TOKEN_FILE}")
export CONSUL_HTTP_TOKEN
fi
consul snapshot save "${TEMP_DIR}/${BACKUP_NAME}.snap" 2>>"${LOG_FILE}" || error_exit "Failed to create Consul snapshot"
log "Verifying snapshot integrity"
consul snapshot inspect "${TEMP_DIR}/${BACKUP_NAME}.snap" >/dev/null 2>>"${LOG_FILE}" || error_exit "Snapshot verification failed"
log "Compressing snapshot with gzip level ${COMPRESSION_LEVEL}"
gzip -${COMPRESSION_LEVEL} "${TEMP_DIR}/${BACKUP_NAME}.snap" || error_exit "Compression failed"
log "Encrypting backup with GPG key ${GPG_KEY_ID}"
gpg --trust-model always --batch --yes --compress-algo 0 \
--cipher-algo AES256 --digest-algo SHA512 \
--s2k-digest-algo SHA512 --s2k-cipher-algo AES256 \
--recipient "${GPG_KEY_ID}" \
--encrypt "${TEMP_DIR}/${BACKUP_NAME}.snap.gz" \
2>>"${LOG_FILE}" || error_exit "GPG encryption failed"
log "Moving encrypted backup to storage directory"
mv "${TEMP_DIR}/${BACKUP_NAME}.snap.gz.gpg" "${BACKUP_DIR}/" || error_exit "Failed to move backup file"
log "Creating backup metadata"
cat > "${BACKUP_DIR}/${BACKUP_NAME}.meta" <<METADATA
{
"timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
"backup_name": "${BACKUP_NAME}",
"hostname": "$(hostname)",
"consul_version": "$(consul version | head -n1)",
"gpg_key_id": "${GPG_KEY_ID}",
"compression": "gzip-${COMPRESSION_LEVEL}",
"encryption": "GPG-AES256"
}
METADATA
log "Cleaning old backups (retention: ${RETENTION_DAYS} days)"
find "${BACKUP_DIR}" -name "consul-snapshot-*.gpg" -mtime +${RETENTION_DAYS} -delete 2>>"${LOG_FILE}" || true
find "${BACKUP_DIR}" -name "consul-snapshot-*.meta" -mtime +${RETENTION_DAYS} -delete 2>>"${LOG_FILE}" || true
find "${LOG_DIR}" -name "backup-*.log" -mtime +7 -delete 2>/dev/null || true
BACKUP_SIZE=$(du -h "${BACKUP_DIR}/${BACKUP_NAME}.snap.gz.gpg" | cut -f1)
BACKUP_COUNT=$(ls -1 "${BACKUP_DIR}"/*.gpg 2>/dev/null | wc -l)
log "Backup completed successfully: ${BACKUP_NAME}.snap.gz.gpg"
log "Backup size: ${BACKUP_SIZE}"
log "Available backups: ${BACKUP_COUNT}"
exit 0
EOF
chown "$CONSUL_BACKUP_USER:$CONSUL_BACKUP_USER" "$CONSUL_BACKUP_DIR/scripts/consul-backup.sh"
chmod 750 "$CONSUL_BACKUP_DIR/scripts/consul-backup.sh"
}
create_restore_script() {
log "Creating restore script..."
cat > "$CONSUL_BACKUP_DIR/scripts/consul-restore.sh" <<'EOF'
#!/bin/bash
set -euo pipefail
BACKUP_DIR="/opt/consul-backup/backups"
if [[ $# -ne 1 ]]; then
echo "Usage: $0 <backup-file.gpg>"
echo "Available backups:"
ls -1 "${BACKUP_DIR}"/*.gpg 2>/dev/null | xargs -n1 basename || echo "No backups found"
exit 1
fi
BACKUP_FILE="$1"
if [[ ! -f "${BACKUP_DIR}/${BACKUP_FILE}" ]]; then
echo "ERROR: Backup file not found: ${BACKUP_DIR}/${BACKUP_FILE}"
exit 1
fi
TEMP_DIR=$(mktemp -d)
trap "rm -rf ${TEMP_DIR}" EXIT
echo "Decrypting backup..."
gpg --batch --yes --decrypt "${BACKUP_DIR}/${BACKUP_FILE}" > "${TEMP_DIR}/backup.snap.gz"
echo "Decompressing backup..."
gunzip "${TEMP_DIR}/backup.snap.gz"
echo "Restoring Consul snapshot..."
consul snapshot restore "${TEMP_DIR}/backup.snap"
echo "Restore completed successfully"
EOF
chown "$CONSUL_BACKUP_USER:$CONSUL_BACKUP_USER" "$CONSUL_BACKUP_DIR/scripts/consul-restore.sh"
chmod 750 "$CONSUL_BACKUP_DIR/scripts/consul-restore.sh"
}
create_cron_job() {
log "Creating cron job for automated backups..."
cat > /etc/cron.d/consul-backup <<EOF
# Consul encrypted backup - runs daily at 2 AM
0 2 * * * $CONSUL_BACKUP_USER /opt/consul-backup/scripts/consul-backup.sh >/dev/null 2>&1
EOF
chmod 644 /etc/cron.d/consul-backup
}
verify_installation() {
log "Verifying installation..."
if [[ ! -f "$CONSUL_BACKUP_DIR/keys/gpg-key-id" ]]; then
error "GPG key ID file not found"
fi
if [[ ! -x "$CONSUL_BACKUP_DIR/scripts/consul-backup.sh" ]]; then
error "Backup script not executable"
fi
if [[ ! -x "$CONSUL_BACKUP_DIR/scripts/consul-restore.sh" ]]; then
error "Restore script not executable"
fi
if [[ ! -f /etc/cron.d/consul-backup ]]; then
error "Cron job not created"
fi
local key_id
key_id=$(cat "$CONSUL_BACKUP_DIR/keys/gpg-key-id")
if ! sudo -u "$CONSUL_BACKUP_USER" gpg --list-keys "$key_id" >/dev/null 2>&1; then
error "GPG key not accessible"
fi
}
main() {
log "Starting Consul backup encryption setup..."
echo "[1/9] Checking prerequisites..."
check_prerequisites
echo "[2/9] Detecting distribution..."
detect_distro
echo "[3/9] Updating packages..."
update_packages
echo "[4/9] Installing required packages..."
install_packages
echo "[5/9] Creating backup user..."
create_backup_user
echo "[6/9] Generating GPG key pair..."
generate_gpg_key
echo "[7/9] Creating backup script..."
create_backup_script
echo "[8/9] Creating restore script..."
create_restore_script
echo "[9/9] Setting up automated backups..."
create_cron_job
log "Verifying installation..."
verify_installation
log "Setup completed successfully!"
log "Backup script: $CONSUL_BACKUP_DIR/scripts/consul-backup.sh"
log "Restore script: $CONSUL_BACKUP_DIR/scripts/consul-restore.sh"
log "Automated backups: Daily at 2:00 AM"
warn "Optional: Place Consul token in $CONSUL_BACKUP_DIR/consul.token for ACL-enabled clusters"
}
main "$@"
Review the script before running. Execute with: bash install.sh