replaced everything with ws
This commit is contained in:
216
scripts/backup-database.sh
Normal file
216
scripts/backup-database.sh
Normal file
@@ -0,0 +1,216 @@
|
||||
#!/bin/bash
|
||||
# Database Backup Script for Lottery Application
|
||||
# This script creates a MySQL dump and transfers it to the backup VPS
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/backup-database.sh [--keep-local] [--compress]
|
||||
#
|
||||
# Options:
|
||||
# --keep-local Keep a local copy of the backup (default: delete after transfer)
|
||||
# --compress Compress the backup before transfer (default: gzip)
|
||||
#
|
||||
# Prerequisites:
|
||||
# 1. SSH key-based authentication to backup VPS (5.45.77.77)
|
||||
# 2. Database password accessible via /run/secrets/lottery-config.properties
|
||||
# 3. Docker container 'lottery-mysql' running
|
||||
#
|
||||
# Backup location on backup VPS: /raid/backup/acc_260182/
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Configuration
|
||||
BACKUP_VPS_HOST="5.45.77.77"
|
||||
BACKUP_VPS_USER="acc_260182" # User account on backup VPS
|
||||
BACKUP_VPS_PATH="/raid/backup/acc_260182"
|
||||
MYSQL_CONTAINER="lottery-mysql"
|
||||
MYSQL_DATABASE="lottery_db"
|
||||
SECRET_FILE="/run/secrets/lottery-config.properties"
|
||||
BACKUP_DIR="/opt/app/backups"
|
||||
KEEP_LOCAL=false
|
||||
COMPRESS=true
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--keep-local)
|
||||
KEEP_LOCAL=true
|
||||
shift
|
||||
;;
|
||||
--no-compress)
|
||||
COMPRESS=false
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1"
|
||||
echo "Usage: $0 [--keep-local] [--no-compress]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Logging function
|
||||
log() {
|
||||
echo -e "${GREEN}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1"
|
||||
}
|
||||
|
||||
error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1" >&2
|
||||
}
|
||||
|
||||
warn() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
# Check if running as root or with sudo
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
error "This script must be run as root (or with sudo)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Load database password
|
||||
if [ ! -f "$SECRET_FILE" ]; then
|
||||
error "Secret file not found at $SECRET_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DB_PASSWORD=$(grep "^SPRING_DATASOURCE_PASSWORD=" "$SECRET_FILE" | cut -d'=' -f2- | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
|
||||
|
||||
if [ -z "$DB_PASSWORD" ]; then
|
||||
error "SPRING_DATASOURCE_PASSWORD not found in secret file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if MySQL container is running
|
||||
if ! docker ps --format '{{.Names}}' | grep -q "^${MYSQL_CONTAINER}$"; then
|
||||
error "MySQL container '${MYSQL_CONTAINER}' is not running"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create backup directory if it doesn't exist
|
||||
mkdir -p "$BACKUP_DIR"
|
||||
|
||||
# Generate backup filename with timestamp
|
||||
TIMESTAMP=$(date +'%Y%m%d_%H%M%S')
|
||||
BACKUP_FILENAME="lottery_db_backup_${TIMESTAMP}.sql"
|
||||
BACKUP_PATH="${BACKUP_DIR}/${BACKUP_FILENAME}"
|
||||
|
||||
# If compression is enabled, add .gz extension
|
||||
if [ "$COMPRESS" = true ]; then
|
||||
BACKUP_FILENAME="${BACKUP_FILENAME}.gz"
|
||||
BACKUP_PATH="${BACKUP_DIR}/${BACKUP_FILENAME}"
|
||||
fi
|
||||
|
||||
log "Starting database backup..."
|
||||
log "Database: ${MYSQL_DATABASE}"
|
||||
log "Container: ${MYSQL_CONTAINER}"
|
||||
log "Backup file: ${BACKUP_FILENAME}"
|
||||
|
||||
# Create MySQL dump
|
||||
log "Creating MySQL dump..."
|
||||
|
||||
if [ "$COMPRESS" = true ]; then
|
||||
# Dump and compress in one step (saves disk space)
|
||||
if docker exec "${MYSQL_CONTAINER}" mysqldump \
|
||||
-u root \
|
||||
-p"${DB_PASSWORD}" \
|
||||
--single-transaction \
|
||||
--routines \
|
||||
--triggers \
|
||||
--events \
|
||||
--quick \
|
||||
--lock-tables=false \
|
||||
"${MYSQL_DATABASE}" | gzip > "${BACKUP_PATH}"; then
|
||||
log "✅ Database dump created and compressed: ${BACKUP_PATH}"
|
||||
else
|
||||
error "Failed to create database dump"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
# Dump without compression
|
||||
if docker exec "${MYSQL_CONTAINER}" mysqldump \
|
||||
-u root \
|
||||
-p"${DB_PASSWORD}" \
|
||||
--single-transaction \
|
||||
--routines \
|
||||
--triggers \
|
||||
--events \
|
||||
--quick \
|
||||
--lock-tables=false \
|
||||
"${MYSQL_DATABASE}" > "${BACKUP_PATH}"; then
|
||||
log "✅ Database dump created: ${BACKUP_PATH}"
|
||||
else
|
||||
error "Failed to create database dump"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Get backup file size
|
||||
BACKUP_SIZE=$(du -h "${BACKUP_PATH}" | cut -f1)
|
||||
log "Backup size: ${BACKUP_SIZE}"
|
||||
|
||||
# Transfer to backup VPS
|
||||
log "Transferring backup to backup VPS (${BACKUP_VPS_HOST})..."
|
||||
|
||||
# Test SSH connection first
|
||||
if ! ssh -o ConnectTimeout=10 -o BatchMode=yes "${BACKUP_VPS_USER}@${BACKUP_VPS_HOST}" "echo 'SSH connection successful'" > /dev/null 2>&1; then
|
||||
error "Cannot connect to backup VPS via SSH"
|
||||
error "Please ensure:"
|
||||
error " 1. SSH key-based authentication is set up"
|
||||
error " 2. Backup VPS is accessible from this server"
|
||||
error " 3. User '${BACKUP_VPS_USER}' has access to ${BACKUP_VPS_PATH}"
|
||||
|
||||
if [ "$KEEP_LOCAL" = true ]; then
|
||||
warn "Keeping local backup despite transfer failure: ${BACKUP_PATH}"
|
||||
else
|
||||
rm -f "${BACKUP_PATH}"
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create backup directory on remote VPS if it doesn't exist
|
||||
ssh "${BACKUP_VPS_USER}@${BACKUP_VPS_HOST}" "mkdir -p ${BACKUP_VPS_PATH}"
|
||||
|
||||
# Transfer the backup file
|
||||
if scp "${BACKUP_PATH}" "${BACKUP_VPS_USER}@${BACKUP_VPS_HOST}:${BACKUP_VPS_PATH}/"; then
|
||||
log "✅ Backup transferred successfully to ${BACKUP_VPS_HOST}:${BACKUP_VPS_PATH}/${BACKUP_FILENAME}"
|
||||
|
||||
# Verify remote file exists
|
||||
REMOTE_SIZE=$(ssh "${BACKUP_VPS_USER}@${BACKUP_VPS_HOST}" "du -h ${BACKUP_VPS_PATH}/${BACKUP_FILENAME} 2>/dev/null | cut -f1" || echo "0")
|
||||
if [ "$REMOTE_SIZE" != "0" ]; then
|
||||
log "✅ Remote backup verified (size: ${REMOTE_SIZE})"
|
||||
else
|
||||
warn "Could not verify remote backup file"
|
||||
fi
|
||||
else
|
||||
error "Failed to transfer backup to backup VPS"
|
||||
if [ "$KEEP_LOCAL" = true ]; then
|
||||
warn "Keeping local backup despite transfer failure: ${BACKUP_PATH}"
|
||||
else
|
||||
rm -f "${BACKUP_PATH}"
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Clean up local backup if not keeping it
|
||||
if [ "$KEEP_LOCAL" = false ]; then
|
||||
rm -f "${BACKUP_PATH}"
|
||||
log "Local backup file removed (transferred successfully)"
|
||||
fi
|
||||
|
||||
# Clean up old backups on remote VPS (keep last 10 days)
|
||||
log "Cleaning up old backups on remote VPS (keeping last 10 days)..."
|
||||
ssh "${BACKUP_VPS_USER}@${BACKUP_VPS_HOST}" "find ${BACKUP_VPS_PATH} -name 'lottery_db_backup_*.sql*' -type f -mtime +10 -delete" || warn "Failed to clean up old backups"
|
||||
|
||||
# Count remaining backups
|
||||
BACKUP_COUNT=$(ssh "${BACKUP_VPS_USER}@${BACKUP_VPS_HOST}" "ls -1 ${BACKUP_VPS_PATH}/lottery_db_backup_*.sql* 2>/dev/null | wc -l" || echo "0")
|
||||
log "Total backups on remote VPS: ${BACKUP_COUNT}"
|
||||
|
||||
log "✅ Backup completed successfully!"
|
||||
log " Remote location: ${BACKUP_VPS_HOST}:${BACKUP_VPS_PATH}/${BACKUP_FILENAME}"
|
||||
|
||||
30
scripts/create-secret-file-from-template.sh
Normal file
30
scripts/create-secret-file-from-template.sh
Normal file
@@ -0,0 +1,30 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Script to create secret file from template
|
||||
# Usage: ./create-secret-file-from-template.sh /path/to/template /path/to/output
|
||||
|
||||
TEMPLATE_FILE="${1:-lottery-config.properties.template}"
|
||||
OUTPUT_FILE="${2:-/run/secrets/lottery-config.properties}"
|
||||
OUTPUT_DIR=$(dirname "$OUTPUT_FILE")
|
||||
|
||||
# Check if template exists
|
||||
if [ ! -f "$TEMPLATE_FILE" ]; then
|
||||
echo "❌ Template file not found: $TEMPLATE_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create output directory if it doesn't exist
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
# Copy template to output
|
||||
cp "$TEMPLATE_FILE" "$OUTPUT_FILE"
|
||||
|
||||
# Set secure permissions (read-only for owner, no access for others)
|
||||
chmod 600 "$OUTPUT_FILE"
|
||||
|
||||
echo "✅ Secret file created at $OUTPUT_FILE"
|
||||
echo "⚠️ IMPORTANT: Edit this file and replace all placeholder values with your actual configuration!"
|
||||
echo "⚠️ After editing, ensure permissions are secure: chmod 600 $OUTPUT_FILE"
|
||||
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
# Create secret file from environment variables for testing ConfigLoader
|
||||
# This simulates the mounted secret file approach used in Inferno
|
||||
|
||||
SECRET_FILE="/run/secrets/honey-config.properties"
|
||||
SECRET_FILE="/run/secrets/lottery-config.properties"
|
||||
SECRET_DIR="/run/secrets"
|
||||
|
||||
# Create directory if it doesn't exist
|
||||
@@ -25,3 +25,4 @@ chmod 644 "$SECRET_FILE"
|
||||
|
||||
echo "✅ Secret file created at $SECRET_FILE from environment variables"
|
||||
|
||||
|
||||
|
||||
227
scripts/diagnose-backup-permissions.sh
Normal file
227
scripts/diagnose-backup-permissions.sh
Normal file
@@ -0,0 +1,227 @@
|
||||
#!/bin/bash
|
||||
# Diagnostic script for backup-database.sh permission issues
|
||||
# Run this on your VPS to identify the root cause
|
||||
|
||||
SCRIPT="/opt/app/backend/lottery-be/scripts/backup-database.sh"
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
echo "=========================================="
|
||||
echo "Backup Script Permission Diagnostic"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
|
||||
# 1. File exists
|
||||
echo "1. Checking if file exists..."
|
||||
if [ -f "$SCRIPT" ]; then
|
||||
echo -e " ${GREEN}✅ File exists${NC}"
|
||||
else
|
||||
echo -e " ${RED}❌ File NOT found at: $SCRIPT${NC}"
|
||||
echo " Please verify the path."
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 2. File permissions
|
||||
echo "2. File permissions:"
|
||||
ls -la "$SCRIPT"
|
||||
echo ""
|
||||
|
||||
# 3. Is executable
|
||||
echo "3. Is file executable?"
|
||||
if [ -x "$SCRIPT" ]; then
|
||||
echo -e " ${GREEN}✅ File is executable${NC}"
|
||||
else
|
||||
echo -e " ${RED}❌ File is NOT executable${NC}"
|
||||
echo " Fix: chmod +x $SCRIPT"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 4. Shebang line
|
||||
echo "4. Shebang line (first line):"
|
||||
SHEBANG=$(head -1 "$SCRIPT")
|
||||
echo " $SHEBANG"
|
||||
if [[ "$SHEBANG" == "#!/bin/bash" ]] || [[ "$SHEBANG" == "#!/usr/bin/bash" ]]; then
|
||||
echo -e " ${GREEN}✅ Shebang looks correct${NC}"
|
||||
else
|
||||
echo -e " ${YELLOW}⚠️ Unexpected shebang${NC}"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 5. Bash exists
|
||||
echo "5. Checking if bash interpreter exists:"
|
||||
if [ -f /bin/bash ]; then
|
||||
echo -e " ${GREEN}✅ /bin/bash exists${NC}"
|
||||
/bin/bash --version | head -1
|
||||
elif [ -f /usr/bin/bash ]; then
|
||||
echo -e " ${GREEN}✅ /usr/bin/bash exists${NC}"
|
||||
/usr/bin/bash --version | head -1
|
||||
else
|
||||
echo -e " ${RED}❌ bash not found in /bin/bash or /usr/bin/bash${NC}"
|
||||
echo " Found at: $(which bash 2>/dev/null || echo 'NOT FOUND')"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 6. Line endings
|
||||
echo "6. Checking line endings:"
|
||||
FILE_TYPE=$(file "$SCRIPT")
|
||||
echo " $FILE_TYPE"
|
||||
if echo "$FILE_TYPE" | grep -q "CRLF"; then
|
||||
echo -e " ${RED}❌ File has Windows line endings (CRLF)${NC}"
|
||||
echo " Fix: dos2unix $SCRIPT"
|
||||
echo " Or: sed -i 's/\r$//' $SCRIPT"
|
||||
elif echo "$FILE_TYPE" | grep -q "ASCII text"; then
|
||||
echo -e " ${GREEN}✅ Line endings look correct (LF)${NC}"
|
||||
else
|
||||
echo -e " ${YELLOW}⚠️ Could not determine line endings${NC}"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 7. Mount options
|
||||
echo "7. Checking filesystem mount options:"
|
||||
MOUNT_INFO=$(mount | grep -E "(/opt|/app)" || echo "Not a separate mount")
|
||||
echo " $MOUNT_INFO"
|
||||
if echo "$MOUNT_INFO" | grep -q "noexec"; then
|
||||
echo -e " ${RED}❌ Filesystem mounted with 'noexec' flag${NC}"
|
||||
echo " This prevents script execution!"
|
||||
echo " Fix: Remove 'noexec' from /etc/fstab and remount"
|
||||
else
|
||||
echo -e " ${GREEN}✅ No 'noexec' flag detected${NC}"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 8. SELinux
|
||||
echo "8. Checking SELinux:"
|
||||
if command -v getenforce &> /dev/null; then
|
||||
SELINUX_STATUS=$(getenforce 2>/dev/null)
|
||||
echo " Status: $SELINUX_STATUS"
|
||||
if [ "$SELINUX_STATUS" = "Enforcing" ]; then
|
||||
echo -e " ${YELLOW}⚠️ SELinux is enforcing - may block execution${NC}"
|
||||
echo " Check context: ls -Z $SCRIPT"
|
||||
else
|
||||
echo -e " ${GREEN}✅ SELinux not blocking (or disabled)${NC}"
|
||||
fi
|
||||
else
|
||||
echo -e " ${GREEN}✅ SELinux not installed${NC}"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 9. Directory permissions
|
||||
echo "9. Parent directory permissions:"
|
||||
DIR=$(dirname "$SCRIPT")
|
||||
ls -ld "$DIR"
|
||||
if [ -x "$DIR" ]; then
|
||||
echo -e " ${GREEN}✅ Directory is executable${NC}"
|
||||
else
|
||||
echo -e " ${RED}❌ Directory is NOT executable${NC}"
|
||||
echo " Fix: chmod +x $DIR"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 10. Syntax check
|
||||
echo "10. Checking script syntax:"
|
||||
if bash -n "$SCRIPT" 2>&1; then
|
||||
echo -e " ${GREEN}✅ Syntax is valid${NC}"
|
||||
else
|
||||
echo -e " ${RED}❌ Syntax errors found${NC}"
|
||||
bash -n "$SCRIPT"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 11. Test execution
|
||||
echo "11. Testing script execution (dry run):"
|
||||
echo " Attempting to read first 10 lines..."
|
||||
if head -10 "$SCRIPT" > /dev/null 2>&1; then
|
||||
echo -e " ${GREEN}✅ Can read script${NC}"
|
||||
else
|
||||
echo -e " ${RED}❌ Cannot read script${NC}"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 12. Cron job check
|
||||
echo "12. Checking cron configuration:"
|
||||
if [ "$EUID" -eq 0 ]; then
|
||||
echo " Root's crontab:"
|
||||
crontab -l 2>/dev/null | grep -i backup || echo " (No backup cron job found in root's crontab)"
|
||||
echo ""
|
||||
echo " To check cron job, run: sudo crontab -l"
|
||||
else
|
||||
echo " (Run as root to check crontab: sudo crontab -l)"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 13. Environment check
|
||||
echo "13. Checking required commands:"
|
||||
REQUIRED_COMMANDS=("docker" "ssh" "gzip" "bash")
|
||||
for cmd in "${REQUIRED_COMMANDS[@]}"; do
|
||||
if command -v "$cmd" &> /dev/null; then
|
||||
CMD_PATH=$(which "$cmd")
|
||||
echo -e " ${GREEN}✅ $cmd${NC} found at: $CMD_PATH"
|
||||
else
|
||||
echo -e " ${RED}❌ $cmd${NC} NOT found in PATH"
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
|
||||
# 14. Secret file check
|
||||
echo "14. Checking secret file:"
|
||||
SECRET_FILE="/run/secrets/lottery-config.properties"
|
||||
if [ -f "$SECRET_FILE" ]; then
|
||||
echo -e " ${GREEN}✅ Secret file exists${NC}"
|
||||
if [ -r "$SECRET_FILE" ]; then
|
||||
echo -e " ${GREEN}✅ Secret file is readable${NC}"
|
||||
else
|
||||
echo -e " ${RED}❌ Secret file is NOT readable${NC}"
|
||||
fi
|
||||
else
|
||||
echo -e " ${YELLOW}⚠️ Secret file not found (script will fail at runtime)${NC}"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Summary
|
||||
echo "=========================================="
|
||||
echo "Summary & Recommendations"
|
||||
echo "=========================================="
|
||||
|
||||
ISSUES=0
|
||||
|
||||
if [ ! -x "$SCRIPT" ]; then
|
||||
echo -e "${RED}❌ Issue: File is not executable${NC}"
|
||||
echo " Fix: chmod +x $SCRIPT"
|
||||
ISSUES=$((ISSUES + 1))
|
||||
fi
|
||||
|
||||
if file "$SCRIPT" | grep -q "CRLF"; then
|
||||
echo -e "${RED}❌ Issue: Windows line endings detected${NC}"
|
||||
echo " Fix: dos2unix $SCRIPT (or: sed -i 's/\r$//' $SCRIPT)"
|
||||
ISSUES=$((ISSUES + 1))
|
||||
fi
|
||||
|
||||
if mount | grep -E "(/opt|/app)" | grep -q "noexec"; then
|
||||
echo -e "${RED}❌ Issue: Filesystem mounted with noexec${NC}"
|
||||
echo " Fix: Remove noexec from /etc/fstab and remount"
|
||||
ISSUES=$((ISSUES + 1))
|
||||
fi
|
||||
|
||||
if [ "$ISSUES" -eq 0 ]; then
|
||||
echo -e "${GREEN}✅ No obvious issues found${NC}"
|
||||
echo ""
|
||||
echo "If cron still fails, try:"
|
||||
echo " 1. Update cron to use bash explicitly:"
|
||||
echo " 0 2 * * * /bin/bash $SCRIPT >> /opt/app/logs/backup.log 2>&1"
|
||||
echo ""
|
||||
echo " 2. Check cron logs:"
|
||||
echo " sudo journalctl -u cron | tail -50"
|
||||
echo ""
|
||||
echo " 3. Test manual execution:"
|
||||
echo " sudo $SCRIPT --keep-local"
|
||||
else
|
||||
echo ""
|
||||
echo -e "${YELLOW}Found $ISSUES issue(s) that need to be fixed.${NC}"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
|
||||
38
scripts/load-db-password.sh
Normal file
38
scripts/load-db-password.sh
Normal file
@@ -0,0 +1,38 @@
|
||||
#!/bin/bash
|
||||
# Script to load database password from secret file
|
||||
# This ensures DB_PASSWORD and DB_ROOT_PASSWORD match SPRING_DATASOURCE_PASSWORD
|
||||
# Usage: source ./load-db-password.sh
|
||||
|
||||
SECRET_FILE="/run/secrets/lottery-config.properties"
|
||||
|
||||
if [ ! -f "$SECRET_FILE" ]; then
|
||||
echo "❌ Error: Secret file not found at $SECRET_FILE"
|
||||
echo " Please create the secret file first (see deployment guide Step 3.3)"
|
||||
return 1 2>/dev/null || exit 1
|
||||
fi
|
||||
|
||||
# Read SPRING_DATASOURCE_PASSWORD from secret file
|
||||
DB_PASSWORD=$(grep "^SPRING_DATASOURCE_PASSWORD=" "$SECRET_FILE" | cut -d'=' -f2- | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
|
||||
|
||||
if [ -z "$DB_PASSWORD" ]; then
|
||||
echo "❌ Error: SPRING_DATASOURCE_PASSWORD not found in secret file"
|
||||
echo " Please ensure the secret file contains: SPRING_DATASOURCE_PASSWORD=your_password"
|
||||
return 1 2>/dev/null || exit 1
|
||||
fi
|
||||
|
||||
# Export both variables (MySQL uses both)
|
||||
export DB_PASSWORD="$DB_PASSWORD"
|
||||
export DB_ROOT_PASSWORD="$DB_PASSWORD"
|
||||
|
||||
# Optionally load PMA_ABSOLUTE_URI from secret file (for phpMyAdmin path protection)
|
||||
PMA_ABSOLUTE_URI=$(grep "^PMA_ABSOLUTE_URI=" "$SECRET_FILE" | cut -d'=' -f2- | sed 's/^[[:space:]]*//;s/[[:space:]]*$//' | sed 's/^"//;s/"$//' | sed "s/^'//;s/'$//")
|
||||
if [ -n "$PMA_ABSOLUTE_URI" ]; then
|
||||
export PMA_ABSOLUTE_URI="$PMA_ABSOLUTE_URI"
|
||||
echo "✅ PMA_ABSOLUTE_URI loaded from secret file"
|
||||
fi
|
||||
|
||||
echo "✅ Database password loaded from secret file"
|
||||
echo " DB_PASSWORD and DB_ROOT_PASSWORD are now set (matching SPRING_DATASOURCE_PASSWORD)"
|
||||
|
||||
|
||||
|
||||
183
scripts/restore-database.sh
Normal file
183
scripts/restore-database.sh
Normal file
@@ -0,0 +1,183 @@
|
||||
#!/bin/bash
|
||||
# Database Restore Script for Lottery Application
|
||||
# This script restores a MySQL database from a backup file
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/restore-database.sh <backup-file>
|
||||
#
|
||||
# Examples:
|
||||
# # Restore from local file
|
||||
# ./scripts/restore-database.sh /opt/app/backups/lottery_db_backup_20240101_120000.sql.gz
|
||||
#
|
||||
# # Restore from backup VPS
|
||||
# ./scripts/restore-database.sh 5.45.77.77:/raid/backup/acc_260182/lottery_db_backup_20240101_120000.sql.gz
|
||||
#
|
||||
# Prerequisites:
|
||||
# 1. Database password accessible via /run/secrets/lottery-config.properties
|
||||
# 2. Docker container 'lottery-mysql' running
|
||||
# 3. Database will be DROPPED and RECREATED (all data will be lost!)
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Configuration
|
||||
MYSQL_CONTAINER="lottery-mysql"
|
||||
MYSQL_DATABASE="lottery_db"
|
||||
SECRET_FILE="/run/secrets/lottery-config.properties"
|
||||
BACKUP_VPS_USER="acc_260182" # User account on backup VPS
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Logging function
|
||||
log() {
|
||||
echo -e "${GREEN}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1"
|
||||
}
|
||||
|
||||
error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1" >&2
|
||||
}
|
||||
|
||||
warn() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
# Check arguments
|
||||
if [ $# -eq 0 ]; then
|
||||
error "No backup file specified"
|
||||
echo "Usage: $0 <backup-file>"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 /opt/app/backups/lottery_db_backup_20240101_120000.sql.gz"
|
||||
echo " $0 5.45.77.77:/raid/backup/acc_260182/lottery_db_backup_20240101_120000.sql.gz"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
BACKUP_SOURCE="$1"
|
||||
|
||||
# Check if running as root or with sudo
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
error "This script must be run as root (or with sudo)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Load database password
|
||||
if [ ! -f "$SECRET_FILE" ]; then
|
||||
error "Secret file not found at $SECRET_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DB_PASSWORD=$(grep "^SPRING_DATASOURCE_PASSWORD=" "$SECRET_FILE" | cut -d'=' -f2- | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
|
||||
|
||||
if [ -z "$DB_PASSWORD" ]; then
|
||||
error "SPRING_DATASOURCE_PASSWORD not found in secret file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if MySQL container is running
|
||||
if ! docker ps --format '{{.Names}}' | grep -q "^${MYSQL_CONTAINER}$"; then
|
||||
error "MySQL container '${MYSQL_CONTAINER}' is not running"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Determine if backup is remote or local
|
||||
TEMP_BACKUP="/tmp/restore_backup_$$"
|
||||
BACKUP_IS_COMPRESSED=false
|
||||
|
||||
if [[ "$BACKUP_SOURCE" == *":"* ]]; then
|
||||
# Remote backup (format: host:/path/to/file)
|
||||
log "Detected remote backup: ${BACKUP_SOURCE}"
|
||||
HOST_PATH=(${BACKUP_SOURCE//:/ })
|
||||
REMOTE_HOST="${HOST_PATH[0]}"
|
||||
REMOTE_PATH="${HOST_PATH[1]}"
|
||||
|
||||
log "Downloading backup from ${REMOTE_HOST}..."
|
||||
if scp "${REMOTE_HOST}:${REMOTE_PATH}" "${TEMP_BACKUP}"; then
|
||||
log "✅ Backup downloaded successfully"
|
||||
else
|
||||
error "Failed to download backup from remote VPS"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
# Local backup
|
||||
if [ ! -f "$BACKUP_SOURCE" ]; then
|
||||
error "Backup file not found: ${BACKUP_SOURCE}"
|
||||
exit 1
|
||||
fi
|
||||
log "Using local backup: ${BACKUP_SOURCE}"
|
||||
cp "$BACKUP_SOURCE" "${TEMP_BACKUP}"
|
||||
fi
|
||||
|
||||
# Check if backup is compressed
|
||||
if [[ "$TEMP_BACKUP" == *.gz ]] || file "$TEMP_BACKUP" | grep -q "gzip compressed"; then
|
||||
BACKUP_IS_COMPRESSED=true
|
||||
log "Backup is compressed (gzip)"
|
||||
fi
|
||||
|
||||
# Get backup file size
|
||||
BACKUP_SIZE=$(du -h "${TEMP_BACKUP}" | cut -f1)
|
||||
log "Backup size: ${BACKUP_SIZE}"
|
||||
|
||||
# WARNING: This will destroy all existing data!
|
||||
warn "⚠️ WARNING: This will DROP and RECREATE the database '${MYSQL_DATABASE}'"
|
||||
warn "⚠️ ALL EXISTING DATA WILL BE LOST!"
|
||||
echo ""
|
||||
read -p "Are you sure you want to continue? Type 'YES' to confirm: " CONFIRM
|
||||
|
||||
if [ "$CONFIRM" != "YES" ]; then
|
||||
log "Restore cancelled by user"
|
||||
rm -f "${TEMP_BACKUP}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
log "Starting database restore..."
|
||||
|
||||
# Drop and recreate database
|
||||
log "Dropping existing database (if exists)..."
|
||||
docker exec "${MYSQL_CONTAINER}" mysql -u root -p"${DB_PASSWORD}" -e "DROP DATABASE IF EXISTS ${MYSQL_DATABASE};" || true
|
||||
|
||||
log "Creating fresh database..."
|
||||
docker exec "${MYSQL_CONTAINER}" mysql -u root -p"${DB_PASSWORD}" -e "CREATE DATABASE ${MYSQL_DATABASE} CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"
|
||||
|
||||
# Restore database
|
||||
log "Restoring database from backup..."
|
||||
|
||||
if [ "$BACKUP_IS_COMPRESSED" = true ]; then
|
||||
# Restore from compressed backup
|
||||
if gunzip -c "${TEMP_BACKUP}" | docker exec -i "${MYSQL_CONTAINER}" mysql -u root -p"${DB_PASSWORD}" "${MYSQL_DATABASE}"; then
|
||||
log "✅ Database restored successfully from compressed backup"
|
||||
else
|
||||
error "Failed to restore database"
|
||||
rm -f "${TEMP_BACKUP}"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
# Restore from uncompressed backup
|
||||
if docker exec -i "${MYSQL_CONTAINER}" mysql -u root -p"${DB_PASSWORD}" "${MYSQL_DATABASE}" < "${TEMP_BACKUP}"; then
|
||||
log "✅ Database restored successfully"
|
||||
else
|
||||
error "Failed to restore database"
|
||||
rm -f "${TEMP_BACKUP}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Clean up temporary file
|
||||
rm -f "${TEMP_BACKUP}"
|
||||
|
||||
# Verify restore
|
||||
log "Verifying restore..."
|
||||
TABLE_COUNT=$(docker exec "${MYSQL_CONTAINER}" mysql -u root -p"${DB_PASSWORD}" -N -e "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = '${MYSQL_DATABASE}';" 2>/dev/null || echo "0")
|
||||
|
||||
if [ "$TABLE_COUNT" -gt 0 ]; then
|
||||
log "✅ Restore verified: ${TABLE_COUNT} tables found in database"
|
||||
else
|
||||
warn "⚠️ Warning: No tables found in database after restore"
|
||||
fi
|
||||
|
||||
log "✅ Database restore completed!"
|
||||
warn "⚠️ Remember to restart the backend container if it's running:"
|
||||
warn " docker restart lottery-backend"
|
||||
|
||||
628
scripts/rolling-update.sh
Normal file
628
scripts/rolling-update.sh
Normal file
@@ -0,0 +1,628 @@
|
||||
#!/bin/bash
|
||||
# Rolling Update Deployment Script
|
||||
# This script performs zero-downtime deployment by:
|
||||
# 1. Building new backend image
|
||||
# 2. Starting new backend container on port 8082
|
||||
# 3. Health checking the new container
|
||||
# 4. Updating Nginx to point to new container
|
||||
# 5. Reloading Nginx (zero downtime)
|
||||
# 6. Stopping old container after grace period
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Colors (define early for use in config detection)
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Logging functions (define early)
|
||||
log() {
|
||||
echo -e "${GREEN}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1"
|
||||
}
|
||||
|
||||
error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1" >&2
|
||||
}
|
||||
|
||||
warn() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
info() {
|
||||
echo -e "${BLUE}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
# Configuration
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||
COMPOSE_FILE="${PROJECT_DIR}/docker-compose.prod.yml"
|
||||
|
||||
# Detect Nginx config file (try common locations)
|
||||
# Priority: sites-enabled (what Nginx actually loads) > conf.d > custom paths
|
||||
NGINX_CONF="${NGINX_CONF:-}"
|
||||
if [ -z "$NGINX_CONF" ]; then
|
||||
if [ -f "/etc/nginx/sites-enabled/win-spin.live" ]; then
|
||||
NGINX_CONF="/etc/nginx/sites-enabled/win-spin.live"
|
||||
log "Using Nginx config: $NGINX_CONF (sites-enabled - active config)"
|
||||
elif [ -f "/etc/nginx/sites-enabled/win-spin.live.conf" ]; then
|
||||
NGINX_CONF="/etc/nginx/sites-enabled/win-spin.live.conf"
|
||||
log "Using Nginx config: $NGINX_CONF (sites-enabled - active config)"
|
||||
elif [ -f "/etc/nginx/conf.d/lottery.conf" ]; then
|
||||
NGINX_CONF="/etc/nginx/conf.d/lottery.conf"
|
||||
log "Using Nginx config: $NGINX_CONF (conf.d)"
|
||||
elif [ -f "/opt/app/nginx/win-spin.live.conf" ]; then
|
||||
warn "Found config at /opt/app/nginx/win-spin.live.conf"
|
||||
warn "Checking if it's symlinked to /etc/nginx/sites-enabled/..."
|
||||
if [ -L "/etc/nginx/sites-enabled/win-spin.live" ] || [ -L "/etc/nginx/sites-enabled/win-spin.live.conf" ]; then
|
||||
# Find the actual target
|
||||
local target=$(readlink -f /etc/nginx/sites-enabled/win-spin.live 2>/dev/null || readlink -f /etc/nginx/sites-enabled/win-spin.live.conf 2>/dev/null)
|
||||
if [ -n "$target" ]; then
|
||||
NGINX_CONF="$target"
|
||||
log "Using Nginx config: $NGINX_CONF (symlink target)"
|
||||
else
|
||||
NGINX_CONF="/opt/app/nginx/win-spin.live.conf"
|
||||
warn "Using custom path - will update this file, but you may need to copy to sites-enabled"
|
||||
fi
|
||||
else
|
||||
NGINX_CONF="/opt/app/nginx/win-spin.live.conf"
|
||||
warn "Using custom path - will update this file, but you may need to copy to sites-enabled"
|
||||
fi
|
||||
else
|
||||
error "Cannot find Nginx config file."
|
||||
error "Searched:"
|
||||
error " - /etc/nginx/sites-enabled/win-spin.live"
|
||||
error " - /etc/nginx/sites-enabled/win-spin.live.conf"
|
||||
error " - /etc/nginx/conf.d/lottery.conf"
|
||||
error " - /opt/app/nginx/win-spin.live.conf"
|
||||
error ""
|
||||
error "Please set NGINX_CONF environment variable with the correct path."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
log "Using Nginx config: $NGINX_CONF (from NGINX_CONF environment variable)"
|
||||
fi
|
||||
|
||||
# Create backup in /tmp to avoid nginx including it (sites-enabled/* includes all files)
|
||||
NGINX_CONF_BACKUP="/tmp/nginx-backup-$(basename $NGINX_CONF).$(date +%Y%m%d_%H%M%S)"
|
||||
|
||||
# Ports for backends (will be swapped dynamically)
|
||||
PRIMARY_PORT=8080
|
||||
STANDBY_PORT=8082
|
||||
|
||||
# Detect which backend is currently active
|
||||
detect_active_backend() {
|
||||
# Check which port Nginx is currently using in upstream block
|
||||
# Look for server line that is NOT marked as backup
|
||||
local active_port_line=$(grep -A 10 "^upstream backend {" "$NGINX_CONF" | grep "server 127\.0\.0\.1:" | grep -v "backup" | head -1)
|
||||
|
||||
if echo "$active_port_line" | grep -q "127\.0\.0\.1:8082"; then
|
||||
# Port 8082 is active (not backup)
|
||||
ACTIVE_PORT=8082
|
||||
STANDBY_PORT=8080
|
||||
ACTIVE_CONTAINER="lottery-backend-new"
|
||||
STANDBY_CONTAINER="lottery-backend"
|
||||
log "Detected: Port 8082 is currently active"
|
||||
else
|
||||
# Port 8080 is active (default or only one present)
|
||||
ACTIVE_PORT=8080
|
||||
STANDBY_PORT=8082
|
||||
ACTIVE_CONTAINER="lottery-backend"
|
||||
STANDBY_CONTAINER="lottery-backend-new"
|
||||
log "Detected: Port 8080 is currently active"
|
||||
fi
|
||||
|
||||
PRIMARY_PORT=$ACTIVE_PORT
|
||||
HEALTH_CHECK_URL="http://127.0.0.1:${STANDBY_PORT}/actuator/health/readiness"
|
||||
}
|
||||
|
||||
HEALTH_CHECK_RETRIES=60 # Increased for Spring Boot startup (60 * 2s = 120s max)
|
||||
HEALTH_CHECK_INTERVAL=2
|
||||
GRACE_PERIOD=10
|
||||
|
||||
# Check for KEEP_FAILED_CONTAINER environment variable (preserve it for rollback)
|
||||
# This allows keeping failed containers for debugging even when using sudo
|
||||
if [ "${KEEP_FAILED_CONTAINER:-}" = "true" ]; then
|
||||
SCRIPT_KEEP_FAILED_CONTAINER="true"
|
||||
export SCRIPT_KEEP_FAILED_CONTAINER
|
||||
log "KEEP_FAILED_CONTAINER=true - failed containers will be kept for debugging"
|
||||
fi
|
||||
|
||||
# Detect docker compose command (newer Docker uses 'docker compose', older uses 'docker-compose')
|
||||
DOCKER_COMPOSE_CMD=""
|
||||
if docker compose version &> /dev/null; then
|
||||
DOCKER_COMPOSE_CMD="docker compose"
|
||||
elif command -v docker-compose &> /dev/null; then
|
||||
DOCKER_COMPOSE_CMD="docker-compose"
|
||||
else
|
||||
error "Neither 'docker compose' nor 'docker-compose' is available"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check prerequisites
|
||||
check_prerequisites() {
|
||||
log "Checking prerequisites..."
|
||||
|
||||
# Check if running as root
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
error "This script must be run as root (or with sudo)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if docker compose is available (already detected above)
|
||||
log "Using Docker Compose command: $DOCKER_COMPOSE_CMD"
|
||||
|
||||
# Check if Nginx config exists
|
||||
if [ ! -f "$NGINX_CONF" ]; then
|
||||
error "Nginx config not found at $NGINX_CONF"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if DB_ROOT_PASSWORD is set
|
||||
if [ -z "${DB_ROOT_PASSWORD:-}" ]; then
|
||||
warn "DB_ROOT_PASSWORD not set, attempting to load from secret file..."
|
||||
if [ -f "${SCRIPT_DIR}/load-db-password.sh" ]; then
|
||||
source "${SCRIPT_DIR}/load-db-password.sh"
|
||||
else
|
||||
error "Cannot load DB_ROOT_PASSWORD. Please set it or run: source scripts/load-db-password.sh"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Detect which backend is currently active
|
||||
detect_active_backend
|
||||
|
||||
# Check if active backend is running
|
||||
if ! docker ps --format '{{.Names}}' | grep -q "^${ACTIVE_CONTAINER}$"; then
|
||||
error "Active backend container (${ACTIVE_CONTAINER}) is not running"
|
||||
error "Please start it first: docker-compose -f ${COMPOSE_FILE} up -d backend"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "✅ Prerequisites check passed"
|
||||
log "Active backend: ${ACTIVE_CONTAINER} on port ${ACTIVE_PORT}"
|
||||
log "New backend will use: ${STANDBY_CONTAINER} on port ${STANDBY_PORT}"
|
||||
}
|
||||
|
||||
# Build new backend image
|
||||
build_new_image() {
|
||||
log "Building new backend image..."
|
||||
|
||||
cd "$PROJECT_DIR"
|
||||
|
||||
# Determine which service to build based on which container will be used
|
||||
# Both services use the same Dockerfile, but we need to build the correct one
|
||||
# to ensure the image cache is updated for the service that will be started
|
||||
if [ "$STANDBY_PORT" = "8082" ]; then
|
||||
SERVICE_TO_BUILD="backend-new"
|
||||
else
|
||||
SERVICE_TO_BUILD="backend"
|
||||
fi
|
||||
|
||||
log "Building service: ${SERVICE_TO_BUILD} (for port ${STANDBY_PORT})..."
|
||||
|
||||
# Build the image for the service that will be used
|
||||
# This ensures the correct service's image cache is updated with latest migrations
|
||||
if [ "$SERVICE_TO_BUILD" = "backend-new" ]; then
|
||||
if $DOCKER_COMPOSE_CMD -f "$COMPOSE_FILE" --profile rolling-update build "$SERVICE_TO_BUILD" 2>&1 | tee /tmp/rolling-update-build.log; then
|
||||
log "✅ New backend image built successfully"
|
||||
else
|
||||
error "Failed to build new backend image"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
if $DOCKER_COMPOSE_CMD -f "$COMPOSE_FILE" build "$SERVICE_TO_BUILD" 2>&1 | tee /tmp/rolling-update-build.log; then
|
||||
log "✅ New backend image built successfully"
|
||||
else
|
||||
error "Failed to build new backend image"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Start new backend container
|
||||
start_new_container() {
|
||||
log "Starting new backend container on port ${STANDBY_PORT}..."
|
||||
|
||||
cd "$PROJECT_DIR"
|
||||
|
||||
# Determine which service to start based on standby port
|
||||
if [ "$STANDBY_PORT" = "8082" ]; then
|
||||
SERVICE_NAME="backend-new"
|
||||
CONTAINER_NAME="lottery-backend-new"
|
||||
else
|
||||
SERVICE_NAME="backend"
|
||||
CONTAINER_NAME="lottery-backend"
|
||||
fi
|
||||
|
||||
# Check if standby container exists (running or stopped)
|
||||
# We need to remove it to ensure a fresh start with migrations
|
||||
if docker ps -a --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
warn "${CONTAINER_NAME} container is already running, stopping it first..."
|
||||
else
|
||||
warn "${CONTAINER_NAME} container exists but is stopped, removing it for fresh start..."
|
||||
fi
|
||||
if [ "$SERVICE_NAME" = "backend-new" ]; then
|
||||
$DOCKER_COMPOSE_CMD -f "$COMPOSE_FILE" --profile rolling-update stop "$SERVICE_NAME" || true
|
||||
$DOCKER_COMPOSE_CMD -f "$COMPOSE_FILE" --profile rolling-update rm -f "$SERVICE_NAME" || true
|
||||
else
|
||||
$DOCKER_COMPOSE_CMD -f "$COMPOSE_FILE" stop "$SERVICE_NAME" || true
|
||||
$DOCKER_COMPOSE_CMD -f "$COMPOSE_FILE" rm -f "$SERVICE_NAME" || true
|
||||
fi
|
||||
fi
|
||||
|
||||
# Start the new container
|
||||
if [ "$SERVICE_NAME" = "backend-new" ]; then
|
||||
if $DOCKER_COMPOSE_CMD -f "$COMPOSE_FILE" --profile rolling-update up -d "$SERVICE_NAME"; then
|
||||
log "✅ New backend container started"
|
||||
else
|
||||
error "Failed to start new backend container"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
if $DOCKER_COMPOSE_CMD -f "$COMPOSE_FILE" up -d "$SERVICE_NAME"; then
|
||||
log "✅ New backend container started"
|
||||
else
|
||||
error "Failed to start new backend container"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Wait for container to initialize (Spring Boot needs time to start)
|
||||
log "Waiting for container to initialize (Spring Boot startup can take 60+ seconds)..."
|
||||
sleep 10
|
||||
|
||||
# Check if container is still running (might have crashed)
|
||||
if ! docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
error "Container ${CONTAINER_NAME} stopped immediately after start. Check logs:"
|
||||
error " docker logs ${CONTAINER_NAME}"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Health check new container
|
||||
health_check_new_container() {
|
||||
log "Performing health check on new backend container (port ${STANDBY_PORT})..."
|
||||
|
||||
# First, check if container is still running
|
||||
if [ "$STANDBY_PORT" = "8082" ]; then
|
||||
local container_name="lottery-backend-new"
|
||||
else
|
||||
local container_name="lottery-backend"
|
||||
fi
|
||||
|
||||
if ! docker ps --format '{{.Names}}' | grep -q "^${container_name}$"; then
|
||||
error "Container ${container_name} is not running!"
|
||||
error "Check logs: docker logs ${container_name}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check container health status
|
||||
local health_status=$(docker inspect --format='{{.State.Health.Status}}' "${container_name}" 2>/dev/null || echo "none")
|
||||
if [ "$health_status" != "none" ]; then
|
||||
info "Container health status: $health_status"
|
||||
fi
|
||||
|
||||
local retries=0
|
||||
while [ $retries -lt $HEALTH_CHECK_RETRIES ]; do
|
||||
# Check if container is still running
|
||||
if ! docker ps --format '{{.Names}}' | grep -q "^${container_name}$"; then
|
||||
error "Container ${container_name} stopped during health check!"
|
||||
error "Check logs: docker logs ${container_name}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Try health check
|
||||
if curl -sf "$HEALTH_CHECK_URL" > /dev/null 2>&1; then
|
||||
log "✅ New backend container is healthy"
|
||||
return 0
|
||||
fi
|
||||
|
||||
retries=$((retries + 1))
|
||||
if [ $retries -lt $HEALTH_CHECK_RETRIES ]; then
|
||||
# Show container status every 5 attempts
|
||||
if [ $((retries % 5)) -eq 0 ]; then
|
||||
info "Health check failed (attempt $retries/$HEALTH_CHECK_RETRIES)"
|
||||
info "Container status: $(docker ps --filter name=${container_name} --format '{{.Status}}')"
|
||||
info "Last 5 log lines:"
|
||||
docker logs --tail 5 "${container_name}" 2>&1 | sed 's/^/ /'
|
||||
else
|
||||
info "Health check failed (attempt $retries/$HEALTH_CHECK_RETRIES), retrying in ${HEALTH_CHECK_INTERVAL}s..."
|
||||
fi
|
||||
sleep $HEALTH_CHECK_INTERVAL
|
||||
fi
|
||||
done
|
||||
|
||||
error "Health check failed after $HEALTH_CHECK_RETRIES attempts"
|
||||
error "New backend container is not responding at $HEALTH_CHECK_URL"
|
||||
error ""
|
||||
error "Container status:"
|
||||
docker ps --filter name=${container_name} --format 'table {{.Names}}\t{{.Status}}\t{{.Ports}}' || true
|
||||
error ""
|
||||
error "Last 200 log lines:"
|
||||
docker logs --tail 200 "${container_name}" 2>&1 | sed 's/^/ /'
|
||||
error ""
|
||||
error "To debug, keep container running and check:"
|
||||
error " docker logs -f ${container_name}"
|
||||
error " docker logs --tail 500 ${container_name} # For even more logs"
|
||||
error " curl -v $HEALTH_CHECK_URL"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Update Nginx configuration
|
||||
update_nginx_config() {
|
||||
log "Updating Nginx configuration to point to new backend (port ${STANDBY_PORT})..."
|
||||
|
||||
# Backup current config
|
||||
cp "$NGINX_CONF" "$NGINX_CONF_BACKUP"
|
||||
log "Backed up Nginx config to: $NGINX_CONF_BACKUP"
|
||||
|
||||
# Use Python for reliable config manipulation
|
||||
# Pass variables directly to Python (not via sys.argv)
|
||||
python3 << PYTHON_SCRIPT
|
||||
import re
|
||||
import sys
|
||||
|
||||
config_file = "$NGINX_CONF"
|
||||
standby_port = "$STANDBY_PORT"
|
||||
active_port = "$ACTIVE_PORT"
|
||||
|
||||
try:
|
||||
# Read the entire file
|
||||
with open(config_file, 'r') as f:
|
||||
lines = f.readlines()
|
||||
|
||||
# Find and update upstream block
|
||||
new_lines = []
|
||||
in_upstream = False
|
||||
upstream_start_idx = -1
|
||||
upstream_end_idx = -1
|
||||
keepalive_line = None
|
||||
keepalive_idx = -1
|
||||
|
||||
# First pass: find upstream block boundaries
|
||||
for i, line in enumerate(lines):
|
||||
if re.match(r'^\s*upstream\s+backend\s*\{', line):
|
||||
upstream_start_idx = i
|
||||
in_upstream = True
|
||||
elif in_upstream and re.match(r'^\s*\}', line):
|
||||
upstream_end_idx = i
|
||||
break
|
||||
elif in_upstream and re.search(r'keepalive', line):
|
||||
keepalive_line = line
|
||||
keepalive_idx = i
|
||||
|
||||
if upstream_start_idx == -1 or upstream_end_idx == -1:
|
||||
raise Exception("Could not find upstream backend block")
|
||||
|
||||
# Build new lines
|
||||
for i, line in enumerate(lines):
|
||||
if i < upstream_start_idx:
|
||||
# Before upstream block - keep as is
|
||||
new_lines.append(line)
|
||||
elif i == upstream_start_idx:
|
||||
# Start of upstream block
|
||||
new_lines.append(line)
|
||||
elif i > upstream_start_idx and i < upstream_end_idx:
|
||||
# Inside upstream block
|
||||
# Skip old server lines
|
||||
if re.search(r'server\s+127\.0\.0\.1:808[02]', line):
|
||||
continue
|
||||
# Skip keepalive (we'll add it at the end)
|
||||
if re.search(r'keepalive', line):
|
||||
continue
|
||||
# Keep comments and other lines
|
||||
new_lines.append(line)
|
||||
elif i == upstream_end_idx:
|
||||
# Before closing brace - add server lines and keepalive
|
||||
new_lines.append(f" server 127.0.0.1:{standby_port};\n")
|
||||
new_lines.append(f" server 127.0.0.1:{active_port} backup;\n")
|
||||
if keepalive_line:
|
||||
new_lines.append(keepalive_line)
|
||||
else:
|
||||
new_lines.append(" keepalive 200;\n")
|
||||
new_lines.append(line)
|
||||
else:
|
||||
# After upstream block - keep as is
|
||||
new_lines.append(line)
|
||||
|
||||
# Write updated config
|
||||
with open(config_file, 'w') as f:
|
||||
f.writelines(new_lines)
|
||||
|
||||
print("Nginx config updated successfully")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error updating Nginx config: {e}", file=sys.stderr)
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
PYTHON_SCRIPT
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
error "Failed to update Nginx config"
|
||||
cp "$NGINX_CONF_BACKUP" "$NGINX_CONF"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Test Nginx configuration
|
||||
if nginx -t; then
|
||||
log "✅ Nginx configuration is valid"
|
||||
else
|
||||
error "Nginx configuration test failed, restoring backup..."
|
||||
error "Error details:"
|
||||
nginx -t 2>&1 | sed 's/^/ /'
|
||||
error ""
|
||||
error "Current config (first 50 lines):"
|
||||
head -50 "$NGINX_CONF" | sed 's/^/ /'
|
||||
cp "$NGINX_CONF_BACKUP" "$NGINX_CONF"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Reload Nginx (zero downtime)
|
||||
reload_nginx() {
|
||||
log "Reloading Nginx (zero downtime)..."
|
||||
|
||||
if systemctl reload nginx; then
|
||||
log "✅ Nginx reloaded successfully"
|
||||
log "✅ Traffic is now being served by new backend (port 8082)"
|
||||
else
|
||||
error "Failed to reload Nginx, restoring backup config..."
|
||||
cp "$NGINX_CONF_BACKUP" "$NGINX_CONF"
|
||||
systemctl reload nginx
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Stop old container after grace period
|
||||
stop_old_container() {
|
||||
log "Waiting ${GRACE_PERIOD}s grace period for active connections to finish..."
|
||||
sleep $GRACE_PERIOD
|
||||
|
||||
log "Stopping old backend container (${ACTIVE_CONTAINER})..."
|
||||
|
||||
cd "$PROJECT_DIR"
|
||||
|
||||
if [ "$ACTIVE_CONTAINER" = "lottery-backend-new" ]; then
|
||||
if $DOCKER_COMPOSE_CMD -f "$COMPOSE_FILE" --profile rolling-update stop backend-new; then
|
||||
log "✅ Old backend container stopped"
|
||||
else
|
||||
warn "Failed to stop old backend container gracefully"
|
||||
fi
|
||||
else
|
||||
if $DOCKER_COMPOSE_CMD -f "$COMPOSE_FILE" stop backend; then
|
||||
log "✅ Old backend container stopped"
|
||||
else
|
||||
warn "Failed to stop old backend container gracefully"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Optionally remove the old container (comment out if you want to keep it for rollback)
|
||||
# if [ "$ACTIVE_CONTAINER" = "lottery-backend-new" ]; then
|
||||
# docker-compose -f "$COMPOSE_FILE" --profile rolling-update rm -f backend-new
|
||||
# else
|
||||
# docker-compose -f "$COMPOSE_FILE" rm -f backend
|
||||
# fi
|
||||
}
|
||||
|
||||
# Rollback function
|
||||
rollback() {
|
||||
error "Rolling back to previous version..."
|
||||
|
||||
# Check KEEP_FAILED_CONTAINER (check both current env and script-level variable)
|
||||
local keep_container="${KEEP_FAILED_CONTAINER:-false}"
|
||||
if [ "$keep_container" != "true" ] && [ "${SCRIPT_KEEP_FAILED_CONTAINER:-false}" = "true" ]; then
|
||||
keep_container="true"
|
||||
fi
|
||||
|
||||
# Restore Nginx config
|
||||
if [ -f "$NGINX_CONF_BACKUP" ]; then
|
||||
cp "$NGINX_CONF_BACKUP" "$NGINX_CONF"
|
||||
systemctl reload nginx
|
||||
log "✅ Nginx config restored"
|
||||
fi
|
||||
|
||||
# Stop new container (but keep it for debugging if KEEP_FAILED_CONTAINER is set)
|
||||
cd "$PROJECT_DIR"
|
||||
if [ "$keep_container" = "true" ]; then
|
||||
warn ""
|
||||
warn "═══════════════════════════════════════════════════════════════"
|
||||
warn "KEEP_FAILED_CONTAINER=true - Container will be KEPT for debugging"
|
||||
warn "═══════════════════════════════════════════════════════════════"
|
||||
if [ "$STANDBY_PORT" = "8082" ]; then
|
||||
$DOCKER_COMPOSE_CMD -f "$COMPOSE_FILE" --profile rolling-update stop backend-new || true
|
||||
warn ""
|
||||
warn "Container 'lottery-backend-new' is STOPPED but NOT REMOVED"
|
||||
warn ""
|
||||
warn "To check logs:"
|
||||
warn " docker logs lottery-backend-new"
|
||||
warn " docker logs --tail 100 lottery-backend-new"
|
||||
warn ""
|
||||
warn "To remove manually:"
|
||||
warn " $DOCKER_COMPOSE_CMD -f $COMPOSE_FILE --profile rolling-update rm -f backend-new"
|
||||
else
|
||||
$DOCKER_COMPOSE_CMD -f "$COMPOSE_FILE" stop backend || true
|
||||
warn ""
|
||||
warn "Container 'lottery-backend' is STOPPED but NOT REMOVED"
|
||||
warn ""
|
||||
warn "To check logs:"
|
||||
warn " docker logs lottery-backend"
|
||||
warn " docker logs --tail 100 lottery-backend"
|
||||
warn ""
|
||||
warn "To remove manually:"
|
||||
warn " $DOCKER_COMPOSE_CMD -f $COMPOSE_FILE rm -f backend"
|
||||
fi
|
||||
warn "═══════════════════════════════════════════════════════════════"
|
||||
else
|
||||
if [ "$STANDBY_PORT" = "8082" ]; then
|
||||
$DOCKER_COMPOSE_CMD -f "$COMPOSE_FILE" --profile rolling-update stop backend-new || true
|
||||
$DOCKER_COMPOSE_CMD -f "$COMPOSE_FILE" --profile rolling-update rm -f backend-new || true
|
||||
else
|
||||
$DOCKER_COMPOSE_CMD -f "$COMPOSE_FILE" stop backend || true
|
||||
$DOCKER_COMPOSE_CMD -f "$COMPOSE_FILE" rm -f backend || true
|
||||
fi
|
||||
fi
|
||||
|
||||
# Start old container if it was stopped
|
||||
if ! docker ps --format '{{.Names}}' | grep -q "^${ACTIVE_CONTAINER}$"; then
|
||||
if [ "$ACTIVE_CONTAINER" = "lottery-backend-new" ]; then
|
||||
$DOCKER_COMPOSE_CMD -f "$COMPOSE_FILE" --profile rolling-update start backend-new || \
|
||||
$DOCKER_COMPOSE_CMD -f "$COMPOSE_FILE" --profile rolling-update up -d backend-new
|
||||
else
|
||||
$DOCKER_COMPOSE_CMD -f "$COMPOSE_FILE" start backend || \
|
||||
$DOCKER_COMPOSE_CMD -f "$COMPOSE_FILE" up -d backend
|
||||
fi
|
||||
fi
|
||||
|
||||
error "Rollback completed"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Main deployment flow
|
||||
main() {
|
||||
log "Starting rolling update deployment..."
|
||||
|
||||
# Trap errors for rollback
|
||||
trap rollback ERR
|
||||
|
||||
check_prerequisites
|
||||
build_new_image
|
||||
start_new_container
|
||||
|
||||
if ! health_check_new_container; then
|
||||
rollback
|
||||
fi
|
||||
|
||||
update_nginx_config
|
||||
reload_nginx
|
||||
|
||||
# Clear error trap after successful switch
|
||||
trap - ERR
|
||||
|
||||
stop_old_container
|
||||
|
||||
log "✅ Rolling update completed successfully!"
|
||||
log ""
|
||||
log "Summary:"
|
||||
log " - New backend is running on port ${STANDBY_PORT} (${STANDBY_CONTAINER})"
|
||||
log " - Nginx is serving traffic from new backend"
|
||||
log " - Old backend (${ACTIVE_CONTAINER}) has been stopped"
|
||||
log ""
|
||||
log "To rollback (if needed):"
|
||||
log " 1. Restore Nginx config: cp $NGINX_CONF_BACKUP $NGINX_CONF"
|
||||
log " 2. Reload Nginx: systemctl reload nginx"
|
||||
if [ "$ACTIVE_CONTAINER" = "lottery-backend-new" ]; then
|
||||
log " 3. Start old backend: docker-compose -f $COMPOSE_FILE --profile rolling-update start backend-new"
|
||||
log " 4. Stop new backend: docker-compose -f $COMPOSE_FILE stop backend"
|
||||
else
|
||||
log " 3. Start old backend: docker-compose -f $COMPOSE_FILE start backend"
|
||||
log " 4. Stop new backend: docker-compose -f $COMPOSE_FILE --profile rolling-update stop backend-new"
|
||||
fi
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
|
||||
119
scripts/setup-logging.sh
Normal file
119
scripts/setup-logging.sh
Normal file
@@ -0,0 +1,119 @@
|
||||
#!/bin/bash
|
||||
# Setup script for external logback-spring.xml on VPS
|
||||
# This script extracts logback-spring.xml from the JAR and places it in the config directory
|
||||
# MUST be run before starting Docker containers to create the required files
|
||||
|
||||
set -e
|
||||
|
||||
# Determine config directory based on current location
|
||||
if [ -d "/opt/app/backend" ]; then
|
||||
CONFIG_DIR="/opt/app/backend/config"
|
||||
LOG_DIR="/opt/app/logs"
|
||||
elif [ -d "/opt/app/backend/lottery-be" ]; then
|
||||
CONFIG_DIR="/opt/app/backend/lottery-be/config"
|
||||
LOG_DIR="/opt/app/logs"
|
||||
else
|
||||
# Try to find from current directory
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
BACKEND_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
CONFIG_DIR="$BACKEND_DIR/config"
|
||||
LOG_DIR="/opt/app/logs"
|
||||
fi
|
||||
|
||||
echo "Setting up external logging configuration..."
|
||||
echo "Config directory: $CONFIG_DIR"
|
||||
echo "Log directory: $LOG_DIR"
|
||||
|
||||
# Create config directory if it doesn't exist
|
||||
mkdir -p "$CONFIG_DIR"
|
||||
chmod 755 "$CONFIG_DIR"
|
||||
|
||||
# Create log directory if it doesn't exist
|
||||
mkdir -p "$LOG_DIR"
|
||||
chmod 755 "$LOG_DIR"
|
||||
|
||||
# Extract logback-spring.xml from JAR if it doesn't exist
|
||||
if [ ! -f "$CONFIG_DIR/logback-spring.xml" ]; then
|
||||
echo "Extracting logback-spring.xml from JAR..."
|
||||
|
||||
# Try multiple locations for JAR file
|
||||
JAR_PATH=""
|
||||
for search_path in "/opt/app/backend" "/opt/app/backend/lottery-be" "$(dirname "$CONFIG_DIR")" "$(dirname "$(dirname "$CONFIG_DIR")")"; do
|
||||
if [ -d "$search_path" ]; then
|
||||
found_jar=$(find "$search_path" -name "lottery-be-*.jar" -type f 2>/dev/null | head -n 1)
|
||||
if [ -n "$found_jar" ]; then
|
||||
JAR_PATH="$found_jar"
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# Try to find in target directory
|
||||
if [ -z "$JAR_PATH" ]; then
|
||||
for search_path in "/opt/app/backend" "/opt/app/backend/lottery-be" "$(dirname "$CONFIG_DIR")"; do
|
||||
if [ -d "$search_path/target" ]; then
|
||||
found_jar=$(find "$search_path/target" -name "*.jar" -type f | head -n 1)
|
||||
if [ -n "$found_jar" ]; then
|
||||
JAR_PATH="$found_jar"
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [ -z "$JAR_PATH" ]; then
|
||||
echo "Warning: JAR file not found. Trying to copy from source..."
|
||||
# If JAR not found, copy from source (if available)
|
||||
for search_path in "/opt/app/backend" "/opt/app/backend/lottery-be" "$(dirname "$CONFIG_DIR")"; do
|
||||
if [ -f "$search_path/src/main/resources/logback-spring.xml" ]; then
|
||||
cp "$search_path/src/main/resources/logback-spring.xml" "$CONFIG_DIR/logback-spring.xml"
|
||||
echo "Copied from source: $search_path/src/main/resources/logback-spring.xml"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ! -f "$CONFIG_DIR/logback-spring.xml" ]; then
|
||||
echo "Error: Cannot find logback-spring.xml in JAR or source."
|
||||
echo "Please ensure the file exists or copy it manually to: $CONFIG_DIR/logback-spring.xml"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "Found JAR: $JAR_PATH"
|
||||
# Extract from JAR
|
||||
unzip -p "$JAR_PATH" BOOT-INF/classes/logback-spring.xml > "$CONFIG_DIR/logback-spring.xml" 2>/dev/null || \
|
||||
unzip -p "$JAR_PATH" logback-spring.xml > "$CONFIG_DIR/logback-spring.xml" 2>/dev/null || {
|
||||
echo "Warning: Could not extract from JAR. Trying to copy from source..."
|
||||
# Try copying from source
|
||||
for search_path in "/opt/app/backend" "/opt/app/backend/lottery-be" "$(dirname "$CONFIG_DIR")"; do
|
||||
if [ -f "$search_path/src/main/resources/logback-spring.xml" ]; then
|
||||
cp "$search_path/src/main/resources/logback-spring.xml" "$CONFIG_DIR/logback-spring.xml"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ! -f "$CONFIG_DIR/logback-spring.xml" ]; then
|
||||
echo "Error: Cannot extract or find logback-spring.xml."
|
||||
echo "Please copy it manually to: $CONFIG_DIR/logback-spring.xml"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
echo "Extracted from JAR: $JAR_PATH"
|
||||
fi
|
||||
|
||||
echo "logback-spring.xml created at $CONFIG_DIR/logback-spring.xml"
|
||||
else
|
||||
echo "logback-spring.xml already exists at $CONFIG_DIR/logback-spring.xml"
|
||||
fi
|
||||
|
||||
# Set proper permissions
|
||||
chmod 644 "$CONFIG_DIR/logback-spring.xml"
|
||||
chown $USER:$USER "$CONFIG_DIR/logback-spring.xml" 2>/dev/null || true
|
||||
|
||||
echo "Logging configuration setup complete!"
|
||||
echo ""
|
||||
echo "Configuration file: $CONFIG_DIR/logback-spring.xml"
|
||||
echo "Log directory: $LOG_DIR"
|
||||
echo ""
|
||||
echo "You can now edit $CONFIG_DIR/logback-spring.xml to change log levels at runtime."
|
||||
echo "Changes will take effect within 30 seconds (no restart needed)."
|
||||
|
||||
Reference in New Issue
Block a user